From cb9cadad578297ffd78fa8a33670bdf1ab669e7e Mon Sep 17 00:00:00 2001 From: Ed Warnicke Date: Tue, 8 Dec 2015 15:45:58 -0700 Subject: Initial commit of vpp code. Change-Id: Ib246f1fbfce93274020ee93ce461e3d8bd8b9f17 Signed-off-by: Ed Warnicke --- dpdk/Makefile | 174 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 dpdk/Makefile (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile new file mode 100644 index 00000000..c33cf63c --- /dev/null +++ b/dpdk/Makefile @@ -0,0 +1,174 @@ +# Copyright (c) 2015 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Scripts require non-POSIX parts of bash +SHELL := /bin/bash + +DPDK_BUILD_DIR ?= $(CURDIR)/_build +DPDK_INSTALL_DIR ?= $(CURDIR)/_install +DPDK_PKTMBUF_HEADROOM ?= 256 +DPDK_DOWNLOAD_DIR ?= $(HOME)/Downloads +DPDK_MARCH ?= native +DPDK_DEBUG ?= n + +B := $(DPDK_BUILD_DIR) +I := $(DPDK_INSTALL_DIR) +DPDK_VERSION := 2.1.0 +DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.gz +DPDK_TAR_URL := http://dpdk.org/browse/dpdk/snapshot/$(DPDK_TARBALL) +DPDK_TARBALL_MD5_CKSUM := 205a0d12bfd6eb717d57506272f43519 +DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) +DPDK_TARGET := x86_64-native-linuxapp-gcc +JOBS := $(shell grep processor /proc/cpuinfo | wc -l) + +# compiler/linker custom arguments +DPDK_CPU_CFLAGS := -pie -fPIC +DPDK_CPU_LDFLAGS := -pie -fPIC +DPDK_EXTRA_LDFLAGS := -g + +ifeq ($(DPDK_DEBUG),n) +DPDK_EXTRA_CFLAGS := -g +else +DPDK_EXTRA_CFLAGS := -g -O0 +endif + +# translate gcc march values to DPDK arch +ifeq ($(DPDK_MARCH),native) +DPDK_MACHINE:=native # autodetect host CPU +else ifeq ($(DPDK_MARCH),corei7) +DPDK_MACHINE:=nhm # Nehalem / Westmere +else ifeq ($(DPDK_MARCH),corei7-avx) +DPDK_MACHINE:=snb # Sandy Bridge +else ifeq ($(DPDK_MARCH),core-avx-i) +DPDK_MACHINE:=ivb # Ivy Bridge +else ifeq ($(DPDK_MARCH),core-avx2) +DPDK_MACHINE:=hsw # Haswell +else +$(error Unknown DPDK_MARCH) +endif + +# assemble DPDK make arguments +DPDK_MAKE_ARGS := -C $(DPDK_SOURCE) -j $(JOBS) \ + T=$(DPDK_TARGET) \ + RTE_CONFIG_TEMPLATE=../custom-config \ + RTE_OUTPUT=$(I) \ + EXTRA_CFLAGS="$(DPDK_EXTRA_CFLAGS)" \ + EXTRA_LDFLAGS="$(DPDK_EXTRA_LDFLAGS)" \ + CPU_CFLAGS="$(DPDK_CPU_CFLAGS)" \ + CPU_LDFLAGS="$(DPDK_CPU_LDFLAGS)" + +DPDK_SOURCE_FILES := $(shell [ -e $(DPDK_SOURCE) ] && find $(DPDK_SOURCE) -name "*.[chS]") + +define set +@if grep -q CONFIG_$1 $@ ; \ + then sed -i -e 's/.*\(CONFIG_$1=\).*/\1$2/' $@ ; \ + else echo CONFIG_$1=$2 >> $@ ; \ +fi +endef + +all: build + +$(B)/custom-config: $(B)/.patch.ok Makefile + @echo --- generating custom config from $(DPDK_SOURCE)/config/common_linuxapp --- + @cp $(DPDK_SOURCE)/config/common_linuxapp $@ + $(call set,RTE_MACHINE,$(DPDK_MACHINE)) + $(call set,RTE_ARCH,"x86_64") + $(call set,RTE_ARCH_X86_64,y) + $(call set,RTE_ARCH_64,y) + $(call set,RTE_TOOLCHAIN_GCC,y) + $(call set,RTE_TOOLCHAIN,"gcc") + @# modify options + $(call set,RTE_PKTMBUF_HEADROOM,$(DPDK_PKTMBUF_HEADROOM)) + $(call set,RTE_LIBEAL_USE_HPET,y) + $(call set,RTE_BUILD_COMBINE_LIBS,y) + $(call set,RTE_LIBRTE_I40E_16BYTE_RX_DESC,y) + $(call set,RTE_LIBRTE_I40E_ITR_INTERVAL,16) + @# enable debug init for device drivers + $(call set,RTE_LIBRTE_I40E_DEBUG_INIT,$(DPDK_DEBUG)) + $(call set,RTE_LIBRTE_IXGBE_DEBUG_INIT,$(DPDK_DEBUG)) + $(call set,RTE_LIBRTE_E1000_DEBUG_INIT,$(DPDK_DEBUG)) + $(call set,RTE_LIBRTE_VIRTIO_DEBUG_INIT,$(DPDK_DEBUG)) + $(call set,RTE_LIBRTE_VMXNET3_DEBUG_INIT,$(DPDK_DEBUG)) + @# not needed + $(call set,RTE_LIBRTE_PMD_BOND,n) + $(call set,RTE_LIBRTE_TIMER,n) + $(call set,RTE_LIBRTE_CFGFILE,n) + $(call set,RTE_LIBRTE_LPM,n) + $(call set,RTE_LIBRTE_ACL,n) + $(call set,RTE_LIBRTE_POWER,n) + $(call set,RTE_LIBRTE_IP_FRAG,n) + $(call set,RTE_LIBRTE_DISTRIBUTOR,n) + $(call set,RTE_LIBRTE_REORDER,n) + $(call set,RTE_LIBRTE_PORT,n) + $(call set,RTE_LIBRTE_TABLE,n) + $(call set,RTE_LIBRTE_PIPELINE,n) + $(call set,RTE_KNI_KMOD,n) + @rm -f .config.ok + +$(CURDIR)/$(DPDK_TARBALL): + @mkdir -p $(B) + @if [ -e $(DPDK_DOWNLOAD_DIR)/$(DPDK_TARBALL) ] ; \ + then cp $(DPDK_DOWNLOAD_DIR)/$(DPDK_TARBALL) $(CURDIR) ; \ + else curl -o $(CURDIR)/$(DPDK_TARBALL) -LO $(DPDK_TAR_URL) ; \ + fi + @rm -f $(B)/.download.ok + +$(B)/.download.ok: $(CURDIR)/$(DPDK_TARBALL) + @openssl md5 $< | cut -f 2 -d " " - > $(B)/$(DPDK_TARBALL).md5sum + @([ "$$(<$(B)/$(DPDK_TARBALL).md5sum)" = "$(DPDK_TARBALL_MD5_CKSUM)" ] || \ + ( echo "Bad Checksum! Please remove $< and retry" && \ + rm $(B)/$(DPDK_TARBALL).md5sum && false )) + @touch $@ + +.PHONY: download +download: $(B)/.download.ok + +$(B)/.extract.ok: $(B)/.download.ok + @echo --- extracting $(DPDK_TARBALL) --- + @tar --directory $(B) --extract --file $(CURDIR)/$(DPDK_TARBALL) --gzip + @touch $@ + +.PHONY: extract +extract: $(B)/.extract.ok + +$(B)/.patch.ok: $(B)/.extract.ok + @echo --- patching --- + for f in $(CURDIR)/dpdk-$(DPDK_VERSION)_patches/*.patch ; do \ + echo Applying patch: $$(basename $$f) ; \ + patch -p1 -d $(DPDK_SOURCE) < $$f ; \ + done + @touch $@ + +.PHONY: patch +patch: $(B)/.patch.ok + +$(B)/.config.ok: $(B)/.patch.ok $(B)/custom-config + @make $(DPDK_MAKE_ARGS) config + @touch $@ + +.PHONY: config +config: $(B)/.config.ok + +$(B)/.build.ok: $(DPDK_SOURCE_FILES) + @if [ ! -e $(B)/.config.ok ] ; then echo 'Please run "make config" first' && false ; fi + @make $(DPDK_MAKE_ARGS) install + @dkms/create_deb_manifest.sh $(DPDK_VERSION) $(subst $(realpath ..)/,,$(B)) + @touch $@ + +.PHONY: build +build: $(B)/.build.ok + +.PHONY: clean +clean: + @rm -rf $(B) $(I) + -- cgit 1.2.3-korg From bce6b6a6bfbedb4209a7f20c65ebcb018e0432ff Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Wed, 16 Dec 2015 04:27:37 +0100 Subject: Introduce support for DPDK 2.2 Change-Id: Id2c7c4a949363a448a6715463dd81f701f046b93 Signed-off-by: Damjan Marion --- dpdk/Makefile | 7 +- ...1-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch | 75 +++++++++++++++++++ ...a-bit-longer-for-autonegotiation-to-leave.patch | 25 +++++++ ...Cleanup-virtio-pmd-debug-log-output-reset.patch | 76 ++++++++++++++++++++ ...f-rearrange-rte_mbuf-metadata-to-suit-vpp.patch | 83 ++++++++++++++++++++++ dpdk/dpdk-2.2.0_patches/0005-missing-include.patch | 24 +++++++ vnet/vnet/devices/dpdk/device.c | 19 ++--- vnet/vnet/devices/dpdk/dpdk_priv.h | 22 ++++++ 8 files changed, 319 insertions(+), 12 deletions(-) create mode 100644 dpdk/dpdk-2.2.0_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch create mode 100644 dpdk/dpdk-2.2.0_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch create mode 100644 dpdk/dpdk-2.2.0_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch create mode 100644 dpdk/dpdk-2.2.0_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch create mode 100644 dpdk/dpdk-2.2.0_patches/0005-missing-include.patch (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index c33cf63c..b95ed766 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -23,10 +23,11 @@ DPDK_DEBUG ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) -DPDK_VERSION := 2.1.0 +DPDK_VERSION ?= 2.1.0 DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.gz DPDK_TAR_URL := http://dpdk.org/browse/dpdk/snapshot/$(DPDK_TARBALL) -DPDK_TARBALL_MD5_CKSUM := 205a0d12bfd6eb717d57506272f43519 +DPDK_2.1.0_TARBALL_MD5_CKSUM := 205a0d12bfd6eb717d57506272f43519 +DPDK_2.2.0_TARBALL_MD5_CKSUM := 22e2fd68cd5504f43fe9a5a6fd6dd938 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) DPDK_TARGET := x86_64-native-linuxapp-gcc JOBS := $(shell grep processor /proc/cpuinfo | wc -l) @@ -125,7 +126,7 @@ $(CURDIR)/$(DPDK_TARBALL): $(B)/.download.ok: $(CURDIR)/$(DPDK_TARBALL) @openssl md5 $< | cut -f 2 -d " " - > $(B)/$(DPDK_TARBALL).md5sum - @([ "$$(<$(B)/$(DPDK_TARBALL).md5sum)" = "$(DPDK_TARBALL_MD5_CKSUM)" ] || \ + @([ "$$(<$(B)/$(DPDK_TARBALL).md5sum)" = "$(DPDK_$(DPDK_VERSION)_TARBALL_MD5_CKSUM)" ] || \ ( echo "Bad Checksum! Please remove $< and retry" && \ rm $(B)/$(DPDK_TARBALL).md5sum && false )) @touch $@ diff --git a/dpdk/dpdk-2.2.0_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch b/dpdk/dpdk-2.2.0_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch new file mode 100644 index 00000000..6ed2fc61 --- /dev/null +++ b/dpdk/dpdk-2.2.0_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch @@ -0,0 +1,75 @@ +From 4a599535445d16a1c55fac0bd71edc443c6c23f2 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Wed, 16 Dec 2015 03:21:21 +0100 +Subject: [PATCH 1/4] e1000: Set VLAN Rx Offload tag correctly + +--- + drivers/net/e1000/igb_rxtx.c | 30 ++++++++++++++++++++++++++++++ + lib/librte_ether/rte_ether.h | 3 +++ + 2 files changed, 33 insertions(+) + +diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c +index 996e7da..cbe80a1 100644 +--- a/drivers/net/e1000/igb_rxtx.c ++++ b/drivers/net/e1000/igb_rxtx.c +@@ -910,6 +910,21 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); + pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); ++ { ++ /* ++ * Check packet for VLAN ethernet types and set ++ * RX Offload flag PKT_RX_VLAN_PKT accordingly. ++ */ ++ struct ether_hdr *eth_hdr = ++ rte_pktmbuf_mtod(rxm, struct ether_hdr *); ++ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); ++ ++ if ((eth_type == ETHER_TYPE_VLAN) || ++ (eth_type == ETHER_TYPE_VLAN_AD) || ++ (eth_type == ETHER_TYPE_VLAN_9100) || ++ (eth_type == ETHER_TYPE_VLAN_9200)) ++ pkt_flags |= PKT_RX_VLAN_PKT; ++ } + rxm->ol_flags = pkt_flags; + rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower. + lo_dword.hs_rss.pkt_info); +@@ -1146,6 +1161,21 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); + pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); ++ { ++ /* ++ * Check packet for VLAN ethernet types and set ++ * RX Offload flag PKT_RX_VLAN_PKT accordingly. ++ */ ++ struct ether_hdr *eth_hdr = ++ rte_pktmbuf_mtod(rxm, struct ether_hdr *); ++ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); ++ ++ if ((eth_type == ETHER_TYPE_VLAN) || ++ (eth_type == ETHER_TYPE_VLAN_AD) || ++ (eth_type == ETHER_TYPE_VLAN_9100) || ++ (eth_type == ETHER_TYPE_VLAN_9200)) ++ pkt_flags |= PKT_RX_VLAN_PKT; ++ } + first_seg->ol_flags = pkt_flags; + first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb. + lower.lo_dword.hs_rss.pkt_info); +diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h +index 07c17d7..fd646ec 100644 +--- a/lib/librte_ether/rte_ether.h ++++ b/lib/librte_ether/rte_ether.h +@@ -332,6 +332,9 @@ struct vxlan_hdr { + #define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */ + #define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */ + #define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */ ++#define ETHER_TYPE_VLAN_AD 0x88a8 /**< IEEE 802.1AD VLAN tagging. */ ++#define ETHER_TYPE_VLAN_9100 0x9100 /**< VLAN 0x9100 tagging. */ ++#define ETHER_TYPE_VLAN_9200 0x9200 /**< VLAN 0x9200 tagging. */ + + #define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr)) + /**< VXLAN tunnel header length. */ +-- +2.5.0 + diff --git a/dpdk/dpdk-2.2.0_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch b/dpdk/dpdk-2.2.0_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch new file mode 100644 index 00000000..b7a50298 --- /dev/null +++ b/dpdk/dpdk-2.2.0_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch @@ -0,0 +1,25 @@ +From 009cd67e5b1ed0592de0fb6ae2fa662ffc172dde Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Wed, 16 Dec 2015 03:22:11 +0100 +Subject: [PATCH 2/4] ixgbe: Wait a bit longer for autonegotiation to leave + +--- + drivers/net/ixgbe/base/ixgbe_82599.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c +index f0deb59..ae66380 100644 +--- a/drivers/net/ixgbe/base/ixgbe_82599.c ++++ b/drivers/net/ixgbe/base/ixgbe_82599.c +@@ -2442,7 +2442,7 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, + autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); + /* Wait for AN to leave state 0 */ +- for (i = 0; i < 10; i++) { ++ for (i = 0; i < 50; i++) { + msec_delay(4); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) +-- +2.5.0 + diff --git a/dpdk/dpdk-2.2.0_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch b/dpdk/dpdk-2.2.0_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch new file mode 100644 index 00000000..874f666b --- /dev/null +++ b/dpdk/dpdk-2.2.0_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch @@ -0,0 +1,76 @@ +From e2592eb622c33791d8ae51153360bd8249bdd056 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Wed, 16 Dec 2015 03:29:22 +0100 +Subject: [PATCH 3/4] virtio: Cleanup virtio pmd debug log output, reset + +--- + drivers/net/virtio/virtio_ethdev.c | 10 +++++----- + drivers/net/virtio/virtio_rxtx.c | 4 +++- + 2 files changed, 8 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index d928339..2fa1587 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -1635,24 +1635,24 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet + link.link_speed = SPEED_10G; + + if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { +- PMD_INIT_LOG(DEBUG, "Get link status from hw"); + vtpci_read_dev_config(hw, + offsetof(struct virtio_net_config, status), + &status, sizeof(status)); + if ((status & VIRTIO_NET_S_LINK_UP) == 0) { + link.link_status = 0; +- PMD_INIT_LOG(DEBUG, "Port %d is down", +- dev->data->port_id); + } else { + link.link_status = 1; +- PMD_INIT_LOG(DEBUG, "Port %d is up", +- dev->data->port_id); + } + } else { + link.link_status = 1; /* Link up */ + } + virtio_dev_atomic_write_link_status(dev, &link); + ++ /* This message is far too noisy for normal use */ ++ if (0) ++ PMD_INIT_LOG(DEBUG, "Port %d is %s\n", dev->data->port_id, ++ link.link_status ? "up" : "down"); ++ + return (old.link_status == link.link_status) ? -1 : 0; + } + +diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c +index 74b39ef..2512bce 100644 +--- a/drivers/net/virtio/virtio_rxtx.c ++++ b/drivers/net/virtio/virtio_rxtx.c +@@ -618,6 +618,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + rxm->next = NULL; + rxm->pkt_len = (uint32_t)(len[i] - hdr_size); + rxm->data_len = (uint16_t)(len[i] - hdr_size); ++ rxm->ol_flags = 0; + + if (hw->vlan_strip) + rte_vlan_strip(rxm); +@@ -737,6 +738,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, + rxm->vlan_tci = 0; + rxm->pkt_len = (uint32_t)(len[0] - hdr_size); + rxm->data_len = (uint16_t)(len[0] - hdr_size); ++ rxm->ol_flags = 0; + + rxm->port = rxvq->port_id; + rx_pkts[nb_rx] = rxm; +@@ -838,7 +840,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + if (unlikely(nb_pkts < 1)) + return nb_pkts; + +- PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); ++ PMD_TX_LOG(DEBUG, "%d packets to xmit\n", nb_pkts); + nb_used = VIRTQUEUE_NUSED(txvq); + + virtio_rmb(); +-- +2.5.0 + diff --git a/dpdk/dpdk-2.2.0_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch b/dpdk/dpdk-2.2.0_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch new file mode 100644 index 00000000..bee64dff --- /dev/null +++ b/dpdk/dpdk-2.2.0_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch @@ -0,0 +1,83 @@ +From b8b575a3398c480f6e02525a0933e5e057139b78 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Wed, 16 Dec 2015 04:25:23 +0100 +Subject: [PATCH 4/4] mbuf: rearrange rte_mbuf metadata to suit vpp + +--- + .../linuxapp/eal/include/exec-env/rte_kni_common.h | 5 +++-- + lib/librte_mbuf/rte_mbuf.h | 20 ++++++++++++-------- + 2 files changed, 15 insertions(+), 10 deletions(-) + +diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h +index bd1cc09..a68a949 100644 +--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h ++++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h +@@ -120,11 +120,12 @@ struct rte_kni_mbuf { + char pad2[4]; + uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */ + uint16_t data_len; /**< Amount of data in segment buffer. */ ++ char pad3[8]; ++ void *next; + + /* fields on second cache line */ +- char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE))); ++ char pad4[16] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE))); + void *pool; +- void *next; + }; + + /* +diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h +index f234ac9..a1b4405 100644 +--- a/lib/librte_mbuf/rte_mbuf.h ++++ b/lib/librte_mbuf/rte_mbuf.h +@@ -734,6 +734,12 @@ struct rte_mbuf_offload; + /** + * The generic rte_mbuf, containing a packet mbuf. + */ ++/* ++ * offload in the second cache line, next in the first. Better for vpp ++ * at least as of right now. ++ * If you change this structure, you must change the user-mode ++ * version in rte_mbuf.h ++ */ + struct rte_mbuf { + MARKER cacheline0; + +@@ -786,6 +792,12 @@ struct rte_mbuf { + uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ + uint16_t data_len; /**< Amount of data in segment buffer. */ + uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */ ++ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ ++ uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ ++ struct rte_mbuf *next; /**< Next segment of scattered packet. */ ++ ++ /* second cache line - fields only used in slow path or on TX */ ++ MARKER cacheline1 __rte_cache_aligned; + + union { + uint32_t rss; /**< RSS hash result if RSS enabled */ +@@ -809,20 +821,12 @@ struct rte_mbuf { + uint32_t usr; /**< User defined tags. See rte_distributor_process() */ + } hash; /**< hash information */ + +- uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ +- +- uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ +- +- /* second cache line - fields only used in slow path or on TX */ +- MARKER cacheline1 __rte_cache_aligned; +- + union { + void *userdata; /**< Can be used for external metadata */ + uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */ + }; + + struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */ +- struct rte_mbuf *next; /**< Next segment of scattered packet. */ + + /* fields to support TX offloads */ + union { +-- +2.5.0 + diff --git a/dpdk/dpdk-2.2.0_patches/0005-missing-include.patch b/dpdk/dpdk-2.2.0_patches/0005-missing-include.patch new file mode 100644 index 00000000..c6211cd4 --- /dev/null +++ b/dpdk/dpdk-2.2.0_patches/0005-missing-include.patch @@ -0,0 +1,24 @@ +From a8767269f3ee545141e83e5a5f62ff24c29248a9 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Wed, 16 Dec 2015 04:43:40 +0100 +Subject: [PATCH 5/5] missing include + +--- + lib/librte_eal/linuxapp/eal/eal_timer.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/lib/librte_eal/linuxapp/eal/eal_timer.c b/lib/librte_eal/linuxapp/eal/eal_timer.c +index 9ceff33..d0792be 100644 +--- a/lib/librte_eal/linuxapp/eal/eal_timer.c ++++ b/lib/librte_eal/linuxapp/eal/eal_timer.c +@@ -51,6 +51,7 @@ + #include + #include + #include ++#include + + #include "eal_private.h" + #include "eal_internal_cfg.h" +-- +2.5.0 + diff --git a/vnet/vnet/devices/dpdk/device.c b/vnet/vnet/devices/dpdk/device.c index a19c3131..781fff46 100644 --- a/vnet/vnet/devices/dpdk/device.c +++ b/vnet/vnet/devices/dpdk/device.c @@ -988,15 +988,16 @@ static u8 * format_dpdk_device (u8 * s, va_list * args) rte_eth_dev_rss_hash_conf_get(xd->device_index, &rss_conf); pci = di.pci_dev; - s = format(s, "%Upci id: device %04x:%04x subsystem %04x:%04x\n" - "%Upci address: %04x:%02x:%02x.%02x\n", - format_white_space, indent + 2, - pci->id.vendor_id, pci->id.device_id, - pci->id.subsystem_vendor_id, - pci->id.subsystem_device_id, - format_white_space, indent + 2, - pci->addr.domain, pci->addr.bus, - pci->addr.devid, pci->addr.function); + if (pci) + s = format(s, "%Upci id: device %04x:%04x subsystem %04x:%04x\n" + "%Upci address: %04x:%02x:%02x.%02x\n", + format_white_space, indent + 2, + pci->id.vendor_id, pci->id.device_id, + pci->id.subsystem_vendor_id, + pci->id.subsystem_device_id, + format_white_space, indent + 2, + pci->addr.domain, pci->addr.bus, + pci->addr.devid, pci->addr.function); s = format(s, "%Umax rx packet len: %d\n", format_white_space, indent + 2, di.max_rx_pktlen); s = format(s, "%Upromiscuous: unicast %s all-multicast %s\n", diff --git a/vnet/vnet/devices/dpdk/dpdk_priv.h b/vnet/vnet/devices/dpdk/dpdk_priv.h index e452e02d..314e2806 100644 --- a/vnet/vnet/devices/dpdk/dpdk_priv.h +++ b/vnet/vnet/devices/dpdk/dpdk_priv.h @@ -22,6 +22,22 @@ #define DPDK_NB_RX_DESC_40GE (4096-128) #define DPDK_NB_TX_DESC_40GE 2048 +#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0) +#define foreach_dpdk_counter \ + _ (tx_frames_ok, opackets) \ + _ (tx_bytes_ok, obytes) \ + _ (tx_errors, oerrors) \ + _ (tx_loopback_frames_ok, olbpackets) \ + _ (tx_loopback_bytes_ok, olbbytes) \ + _ (rx_frames_ok, ipackets) \ + _ (rx_bytes_ok, ibytes) \ + _ (rx_errors, ierrors) \ + _ (rx_missed, imissed) \ + _ (rx_multicast_frames_ok, imcasts) \ + _ (rx_no_bufs, rx_nombuf) \ + _ (rx_loopback_frames_ok, ilbpackets) \ + _ (rx_loopback_bytes_ok, ilbbytes) +#else #define foreach_dpdk_counter \ _ (tx_frames_ok, opackets) \ _ (tx_bytes_ok, obytes) \ @@ -44,6 +60,7 @@ _ (rx_pause_xoff, rx_pause_xoff) \ _ (rx_loopback_frames_ok, ilbpackets) \ _ (rx_loopback_bytes_ok, ilbbytes) +#endif #define foreach_dpdk_q_counter \ _ (rx_frames_ok, q_ipackets) \ @@ -412,10 +429,15 @@ dpdk_update_counters (dpdk_device_t * xd, f64 now) xd->stats.imissed - xd->last_stats.imissed); } +#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0) + rxerrors = xd->stats.ierrors; + last_rxerrors = xd->last_stats.ierrors; +#else rxerrors = xd->stats.ibadcrc + xd->stats.ibadlen + xd->stats.ierrors; last_rxerrors = xd->last_stats.ibadcrc + xd->last_stats.ibadlen + xd->last_stats.ierrors; +#endif if (PREDICT_FALSE (rxerrors != last_rxerrors)) { -- cgit 1.2.3-korg From eec5a51c122dcccaf4b9e6f9899a7f3061ca9853 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 2 Feb 2016 15:37:58 +0100 Subject: Change default dpdk version to 2.2 Change-Id: Iea686ed26d37539d60e52ac1205517512091edab Signed-off-by: Damjan Marion --- dpdk/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index b95ed766..24b78cec 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -23,7 +23,7 @@ DPDK_DEBUG ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) -DPDK_VERSION ?= 2.1.0 +DPDK_VERSION ?= 2.2.0 DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.gz DPDK_TAR_URL := http://dpdk.org/browse/dpdk/snapshot/$(DPDK_TARBALL) DPDK_2.1.0_TARBALL_MD5_CKSUM := 205a0d12bfd6eb717d57506272f43519 -- cgit 1.2.3-korg From 13f0718e643c3ddd9474ce45aebe3c568d26daef Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Fri, 5 Feb 2016 06:44:54 -0800 Subject: Increase number of dpdk lcores to 256 Change-Id: Ia0e228d02fd9f180ed9ca468074ab38d575e415e Signed-off-by: Damjan Marion --- dpdk/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 24b78cec..d7d46713 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -89,6 +89,7 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_TOOLCHAIN_GCC,y) $(call set,RTE_TOOLCHAIN,"gcc") @# modify options + $(call set,RTE_MAX_LCORE,256) $(call set,RTE_PKTMBUF_HEADROOM,$(DPDK_PKTMBUF_HEADROOM)) $(call set,RTE_LIBEAL_USE_HPET,y) $(call set,RTE_BUILD_COMBINE_LIBS,y) -- cgit 1.2.3-korg From d9bf9abbabac7ea637a25461757303a92e321f7e Mon Sep 17 00:00:00 2001 From: John Lo Date: Thu, 25 Feb 2016 11:17:55 -0500 Subject: Add support of Ethernet link bonding utilizing DPDK link bonding poll mode driver library. The bonded interfaces to be created on VPP startup is specified in the dpdk section of startup.conf or qn.conf, using DPDK EAL command. Following is an example of a dpdk section white listing PCI addressses of 4 ethernet interfacess to be under VPP control plus two bonded interface and the PCI addresses of the slaves in each: dpdk { socket-mem 1024,1024 dev 0000:0f:00.0 dev 0000:10:00.0 dev 0000:11:00.0 dev 0000:12:00.0 vdev eth_bond0,mode=2,slave=0000:0f:00.0,slave=0000:11:00.0,xmit_policy=l34 vdev eth_bond1,mode=2,slave=0000:10:00.0,slave=0000:12:00.0,xmit_policy=l34 } Note that only balance XOR (mode 2) is supported and "xmit_policy=l34" specifies to use layer 3 SIP/DIP and layer 4 Sport/Dport for load balance. Using "xmit_policy=l2" for SMAC/DMAC or "xmit_policy=l23" for SMAC/DMAC and SIP/DIP should also work. Change-Id: Iaf6438686fa20cce893cb5a823b76e2886b4360b Signed-off-by: John Lo --- dpdk/Makefile | 4 ++-- vnet/vnet/devices/dpdk/device.c | 11 +++++++-- vnet/vnet/devices/dpdk/dpdk.h | 3 +++ vnet/vnet/devices/dpdk/init.c | 49 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 63 insertions(+), 4 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index d7d46713..307faf06 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -101,14 +101,14 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_LIBRTE_E1000_DEBUG_INIT,$(DPDK_DEBUG)) $(call set,RTE_LIBRTE_VIRTIO_DEBUG_INIT,$(DPDK_DEBUG)) $(call set,RTE_LIBRTE_VMXNET3_DEBUG_INIT,$(DPDK_DEBUG)) + $(call set,RTE_LIBRTE_PMD_BOND,y) + $(call set,RTE_LIBRTE_IP_FRAG,y) @# not needed - $(call set,RTE_LIBRTE_PMD_BOND,n) $(call set,RTE_LIBRTE_TIMER,n) $(call set,RTE_LIBRTE_CFGFILE,n) $(call set,RTE_LIBRTE_LPM,n) $(call set,RTE_LIBRTE_ACL,n) $(call set,RTE_LIBRTE_POWER,n) - $(call set,RTE_LIBRTE_IP_FRAG,n) $(call set,RTE_LIBRTE_DISTRIBUTOR,n) $(call set,RTE_LIBRTE_REORDER,n) $(call set,RTE_LIBRTE_PORT,n) diff --git a/vnet/vnet/devices/dpdk/device.c b/vnet/vnet/devices/dpdk/device.c index 327f9dec..08fe27e5 100644 --- a/vnet/vnet/devices/dpdk/device.c +++ b/vnet/vnet/devices/dpdk/device.c @@ -812,6 +812,9 @@ static u8 * format_dpdk_device_name (u8 * s, va_list * args) device_name = "FortyGigabitEthernet"; break; + case VNET_DPDK_PORT_TYPE_ETH_BOND: + return format(s, "BondEthernet%d", dm->devices[i].device_index); + case VNET_DPDK_PORT_TYPE_ETH_SWITCH: device_name = "EthernetSwitch"; break; @@ -926,8 +929,12 @@ static u8 * format_dpdk_device_type (u8 * s, va_list * args) #endif case VNET_DPDK_PMD_AF_PACKET: - dev_type = "af_packet"; - break; + dev_type = "af_packet"; + break; + + case VNET_DPDK_PMD_BOND: + dev_type = "Ethernet Bonding"; + break; default: case VNET_DPDK_PMD_UNKNOWN: diff --git a/vnet/vnet/devices/dpdk/dpdk.h b/vnet/vnet/devices/dpdk/dpdk.h index da4e381b..f17c53c7 100644 --- a/vnet/vnet/devices/dpdk/dpdk.h +++ b/vnet/vnet/devices/dpdk/dpdk.h @@ -51,6 +51,7 @@ #include #include #include +#include #include #include @@ -90,6 +91,7 @@ typedef enum { _ ("rte_enic_pmd", ENIC) \ _ ("rte_vmxnet3_pmd", VMXNET3) \ _ ("AF_PACKET PMD", AF_PACKET) \ + _ ("rte_bond_pmd", BOND) \ _ ("rte_pmd_fm10k", FM10K) \ _ ("rte_cxgbe_pmd", CXGBE) @@ -108,6 +110,7 @@ typedef enum { VNET_DPDK_PORT_TYPE_ETH_1G, VNET_DPDK_PORT_TYPE_ETH_10G, VNET_DPDK_PORT_TYPE_ETH_40G, + VNET_DPDK_PORT_TYPE_ETH_BOND, VNET_DPDK_PORT_TYPE_ETH_SWITCH, #ifdef NETMAP VNET_DPDK_PORT_TYPE_NETMAP, diff --git a/vnet/vnet/devices/dpdk/init.c b/vnet/vnet/devices/dpdk/init.c index c9187d5e..f958f81e 100644 --- a/vnet/vnet/devices/dpdk/init.c +++ b/vnet/vnet/devices/dpdk/init.c @@ -465,6 +465,10 @@ dpdk_lib_init (dpdk_main_t * dm) xd->af_packet_port_id = af_packet_port_id++; break; + case VNET_DPDK_PMD_BOND: + xd->port_type = VNET_DPDK_PORT_TYPE_ETH_BOND; + break; + default: xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN; } @@ -1589,7 +1593,9 @@ dpdk_process (vlib_main_t * vm, vlib_frame_t * f) { clib_error_t * error; + vnet_main_t * vnm = vnet_get_main(); dpdk_main_t * dm = &dpdk_main; + ethernet_main_t * em = ðernet_main; dpdk_device_t * xd; vlib_thread_main_t * tm = vlib_get_thread_main(); void *vu_state; @@ -1630,6 +1636,45 @@ dpdk_process (vlib_main_t * vm, dpdk_update_link_state (xd, now); } +{ // Setup MACs for bond interfaces and their links which was initialized in + // dpdk_port_setup() but needs to be done again here to take effect. + int nports = rte_eth_dev_count(); + if (nports > 0) { + for (i = 0; i < nports; i++) { + struct rte_eth_dev_info dev_info; + rte_eth_dev_info_get(i, &dev_info); + if (!dev_info.driver_name) + dev_info.driver_name = dev_info.pci_dev->driver->name; + ASSERT(dev_info.driver_name); + if (strncmp(dev_info.driver_name, "rte_bond_pmd", 12) == 0) { + u8 addr[6]; + u8 slink[16]; + int nlink = rte_eth_bond_slaves_get(i, slink, 16); + if (nlink > 0) { + vnet_hw_interface_t * hi; + ethernet_interface_t * ei; + /* Get MAC of 1st slave link */ + rte_eth_macaddr_get(slink[0], (struct ether_addr *)addr); + /* Set MAC of bounded interface to that of 1st slave link */ + rte_eth_bond_mac_address_set(i, (struct ether_addr *)addr); + /* Populate MAC of bonded interface in VPP hw tables */ + hi = vnet_get_hw_interface ( + vnm, dm->devices[i].vlib_hw_if_index); + ei = pool_elt_at_index (em->interfaces, hi->hw_instance); + memcpy (hi->hw_address, addr, 6); + memcpy (ei->address, addr, 6); + /* Add MAC to other slave links */ + while (nlink > 1) { + nlink--; + rte_eth_dev_mac_addr_add( + slink[nlink], (struct ether_addr *)addr, 0); + } + } + } + } + } +} + while (1) { vlib_process_wait_for_event_or_clock (vm, 5.0); @@ -1733,6 +1778,10 @@ do { \ _(rte_cxgbe_driver) #endif +#ifdef RTE_LIBRTE_PMD_BOND + _(bond_drv) +#endif + #undef _ /* -- cgit 1.2.3-korg From 966a8b868b33de72d5d9edb6317418165c62661c Mon Sep 17 00:00:00 2001 From: Jean-Mickael Guerin Date: Wed, 2 Mar 2016 19:05:05 +0100 Subject: dpdk: fix link error /usr/bin/ld: -f may not be used without -shared collect2: error: ld returned 1 exit status Indeed an extra "-fPIC" is given to ld, see the -Wl,-fPIC below: gcc -pie -fPIC -pthread -march=native -DRTE_MACHINE_CPUFLAG_SSE -DRTE_MACHINE_CPUFLAG_SSE2 -DRTE_MACHINE_CPUFLAG_SSE3 -DRTE_MACHINE_CPUFLAG_SSSE3 -DRTE_MACHINE_CPUFLAG_SSE4_1 -DRTE_MACHINE_CPUFLAG_SSE4_2 -DRTE_MACHINE_CPUFLAG_AES -DRTE_MACHINE_CPUFLAG_PCLMULQDQ -DRTE_MACHINE_CPUFLAG_AVX -DRTE_COMPILE_TIME_CPUFLAGS=RTE_CPUFLAG_SSE,RTE_CPUFLAG_SSE2,RTE_CPUFLAG_SSE3,RTE_CPUFLAG_SSSE3,RTE_CPUFLAG_SSE4_1,RTE_CPUFLAG_SSE4_2,RTE_CPUFLAG_AES,RTE_CPUFLAG_PCLMULQDQ,RTE_CPUFLAG_AVX -I/home/jmg/dev/vpp/build-root/install-vpp_debug-native/dpdk/include -include /home/jmg/dev/vpp/build-root/install-vpp_debug-native/dpdk/include/rte_config.h -O3 -W -Wall -Werror -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations -Wold-style-definition -Wpointer-arith -Wcast-align -Wnested-externs -Wcast-qual -Wformat-nonliteral -Wformat-security -Wundef -Wwrite-strings -D_GNU_SOURCE -Wl,-Map=test.map,--cref -o test commands.o test.o test_pci.o test_prefetch.o test_byteorder.o test_per_lcore.o test_atomic.o test_malloc.o test_cycles.o test_spinlock.o test_memory.o test_memzone.o test_ring.o test_ring_perf.o test_pmd_perf.o test_rwlock.o test_mempool.o test_mempool_perf.o test_mbuf.o test_logs.o test_memcpy.o test_memcpy_perf.o test_hash.o test_thash.o test_hash_perf.o test_hash_functions.o test_hash_scaling.o test_debug.o test_errno.o test_tailq.o test_string_fns.o test_cpuflags.o test_mp_secondary.o test_eal_flags.o test_eal_fs.o test_alarm.o test_interrupts.o test_version.o test_func_reentrancy.o test_cmdline.o test_cmdline_num.o test_cmdline_etheraddr.o test_cmdline_portlist.o test_cmdline_ipaddr.o test_cmdline_cirbuf.o test_cmdline_string.o test_cmdline_lib.o test_red.o test_sched.o test_meter.o test_kni.o test_common.o test_devargs.o virtual_pmd.o packet_burst_generator.o test_link_bonding.o test_link_bonding_mode4.o test_link_bonding_rssconf.o test_pmd_ring.o test_pmd_ring_perf.o test_cryptodev_perf.o test_cryptodev.o test_kvargs.o -Wl,-pie -Wl,-fPIC -Wl,--no-as-needed -Wl,-export-dynamic -L/home/jmg/dev/vpp/build-root/install-vpp_debug-native/dpdk/lib -Wl,-g -L/home/jmg/dev/vpp/build-root/install-vpp_debug-native/dpdk/lib -Wl,--whole-archive -Wl,-ldpdk -Wl,--start-group -Wl,-lrt -Wl,-lm -Wl,-ldl -Wl,--end-group -Wl,--no-whole-archive Fixed by unsetting the flags -pie -fPIC out of LDFLAGS, these are not options for the linker. $ gcc -v Using built-in specs. COLLECT_GCC=gcc COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper Target: x86_64-linux-gnu Configured with: ../src/configure -v --with-pkgversion='Ubuntu 5.3.1-10ubuntu2' --with-bugurl=file:///usr/share/doc/gcc-5/README.Bugs --enable-languages=c,ada,c++,java,go,d,fortran,objc,obj-c++ --prefix=/usr --program-suffix=-5 --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --with-sysroot=/ --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-libmpx --enable-plugin --with-system-zlib --disable-browser-plugin --enable-java-awt=gtk --enable-gtk-cairo --with-java-home=/usr/lib/jvm/java-1.5.0-gcj-5-amd64/jre --enable-java-home --with-jvm-root-dir=/usr/lib/jvm/java-1.5.0-gcj-5-amd64 --with-jvm-jar-dir=/usr/lib/jvm-exports/java-1.5.0-gcj-5-amd64 --with-arch-directory=amd64 --with-ecj-jar=/usr/share/java/eclipse-ecj.jar --enable-objc-gc --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu Thread model: posix gcc version 5.3.1 20160225 (Ubuntu 5.3.1-10ubuntu2) Change-Id: Iffbebfbb625e8831822ec092bea88dea42f12930 Signed-off-by: Jean-Mickael Guerin --- dpdk/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 307faf06..a07e8615 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -34,7 +34,7 @@ JOBS := $(shell grep processor /proc/cpuinfo | wc -l) # compiler/linker custom arguments DPDK_CPU_CFLAGS := -pie -fPIC -DPDK_CPU_LDFLAGS := -pie -fPIC +DPDK_CPU_LDFLAGS := DPDK_EXTRA_LDFLAGS := -g ifeq ($(DPDK_DEBUG),n) -- cgit 1.2.3-korg From 19010202285445372c281faca6a9757fd0ed466c Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Thu, 24 Mar 2016 17:17:47 +0100 Subject: Use rte_mempool private data for storing vlib_buffer_t Change-Id: If3fc88a35bc0b736376113a39667caea42802ea1 Signed-off-by: Damjan Marion --- build-data/platforms/virl.mk | 2 -- build-data/platforms/vpp.mk | 2 -- dpdk/Makefile | 2 +- vlib/vlib/buffer.h | 45 ++++++++++++++++++++------------- vlib/vlib/buffer_funcs.h | 37 +++++++++++++++++---------- vlib/vlib/dpdk_buffer.c | 51 +++++++++++++++++++++----------------- vnet/vnet/devices/af_packet/node.c | 8 +++--- vnet/vnet/devices/dpdk/device.c | 20 +++++++-------- vnet/vnet/devices/dpdk/dpdk.h | 1 - vnet/vnet/devices/dpdk/init.c | 4 +-- vnet/vnet/devices/dpdk/node.c | 32 ++++++++++++------------ vnet/vnet/devices/ssvm/ssvm_eth.h | 2 +- vnet/vnet/dpdk_replication.h | 6 ++--- vnet/vnet/ip/ip.h | 2 +- vnet/vnet/ip/ip6_forward.c | 2 +- vnet/vnet/pg/input.c | 6 ++--- vnet/vnet/unix/tapcli.c | 4 +-- vnet/vnet/unix/tuntap.c | 12 +++------ 18 files changed, 126 insertions(+), 112 deletions(-) (limited to 'dpdk/Makefile') diff --git a/build-data/platforms/virl.mk b/build-data/platforms/virl.mk index 29b47dad..f47f3691 100644 --- a/build-data/platforms/virl.mk +++ b/build-data/platforms/virl.mk @@ -24,8 +24,6 @@ vpp_configure_args_virl = --with-dpdk vnet_configure_args_virl = --with-dpdk --with-virl # Set these parameters carefully. The vlib_buffer_t is 128 bytes, i.e. -# dpdk_headroom = uiotarball_headroom = vlib_pre_data + 128 -dpdk_configure_args_virl = --with-headroom=256 vlib_configure_args_virl = --with-pre-data=128 # Override default -march and CONFIG_RTE_MACHINE settings diff --git a/build-data/platforms/vpp.mk b/build-data/platforms/vpp.mk index 6b1ba82f..50c85dcc 100644 --- a/build-data/platforms/vpp.mk +++ b/build-data/platforms/vpp.mk @@ -24,8 +24,6 @@ vpp_configure_args_vpp = --with-dpdk vnet_configure_args_vpp = --with-dpdk # Set these parameters carefully. The vlib_buffer_t is 128 bytes, i.e. -# dpdk_headroom = uiotarball_headroom = vlib_pre_data + 128 -dpdk_configure_args_vpp = --with-headroom=256 vlib_configure_args_vpp = --with-pre-data=128 diff --git a/dpdk/Makefile b/dpdk/Makefile index a07e8615..dabbf6ad 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -16,7 +16,7 @@ SHELL := /bin/bash DPDK_BUILD_DIR ?= $(CURDIR)/_build DPDK_INSTALL_DIR ?= $(CURDIR)/_install -DPDK_PKTMBUF_HEADROOM ?= 256 +DPDK_PKTMBUF_HEADROOM ?= 128 DPDK_DOWNLOAD_DIR ?= $(HOME)/Downloads DPDK_MARCH ?= native DPDK_DEBUG ?= n diff --git a/vlib/vlib/buffer.h b/vlib/vlib/buffer.h index 6322481b..9c148ef2 100644 --- a/vlib/vlib/buffer.h +++ b/vlib/vlib/buffer.h @@ -45,7 +45,16 @@ #include #include #include /* for vlib_error_t */ + +#if DPDK > 0 +#include +#define VLIB_BUFFER_DATA_SIZE (2048) +#define VLIB_BUFFER_PRE_DATA_SIZE RTE_PKTMBUF_HEADROOM +#else #include /* for __PRE_DATA_SIZE */ +#define VLIB_BUFFER_DATA_SIZE (512) +#define VLIB_BUFFER_PRE_DATA_SIZE __PRE_DATA_SIZE +#endif #ifdef CLIB_HAVE_VEC128 typedef u8x16 vlib_copy_unit_t; @@ -62,6 +71,7 @@ typedef uword vlib_copy_unit_t; /* VLIB buffer representation. */ typedef struct { + CLIB_CACHE_LINE_ALIGN_MARK(cacheline0); /* Offset within data[] that we are currently processing. If negative current header points into predata area. */ i16 current_data; /**< signed offset in data[], pre_data[] @@ -124,23 +134,25 @@ typedef struct { u32 opaque[8]; /**< Opaque data used by sub-graphs for their own purposes. See .../vnet/vnet/buffer.h */ - /***** end of first cache line */ + CLIB_CACHE_LINE_ALIGN_MARK(cacheline1); u32 opaque2[16]; /**< More opaque data, in its own cache line */ /***** end of second cache line */ - u8 pre_data [__PRE_DATA_SIZE]; /**< Space for inserting data - before buffer start. - Packet rewrite string will be - rewritten backwards and may extend - back before buffer->data[0]. - Must come directly before packet data. - */ - -#define VLIB_BUFFER_PRE_DATA_SIZE (ARRAY_LEN (((vlib_buffer_t *)0)->pre_data)) + CLIB_CACHE_LINE_ALIGN_MARK(cacheline2); + u8 pre_data [VLIB_BUFFER_PRE_DATA_SIZE]; /**< Space for inserting data + before buffer start. + Packet rewrite string will be + rewritten backwards and may extend + back before buffer->data[0]. + Must come directly before packet data. + */ + u8 data[0]; /**< Packet data. Hardware DMA here */ } vlib_buffer_t; /* Must be a multiple of 64B. */ +#define VLIB_BUFFER_HDR_SIZE (sizeof(vlib_buffer_t) - VLIB_BUFFER_PRE_DATA_SIZE) + /** \brief Prefetch buffer metadata. The first 64 bytes of buffer contains most header information @@ -283,13 +295,7 @@ typedef struct { initializing static data for each packet generated. */ vlib_buffer_free_list_t * buffer_free_list_pool; #define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX (0) - -#if DPDK == 1 -/* must be same as dpdk buffer size */ -#define VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES (2048) -#else -#define VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES (512) -#endif +#define VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES VLIB_BUFFER_DATA_SIZE /* Hash table mapping buffer size (rounded to next unit of sizeof (vlib_buffer_t)) to free list index. */ @@ -357,6 +363,11 @@ serialize_vlib_buffer_n_bytes (serialize_main_t * m) return sm->tx.n_total_data_bytes + s->current_buffer_index + vec_len (s->overflow_buffer); } +#if DPDK > 0 +#define rte_mbuf_from_vlib_buffer(x) (((struct rte_mbuf *)x) - 1) +#define vlib_buffer_from_rte_mbuf(x) ((vlib_buffer_t *)(x+1)) +#endif + /* */ diff --git a/vlib/vlib/buffer_funcs.h b/vlib/vlib/buffer_funcs.h index eea417a9..f7bdb12d 100644 --- a/vlib/vlib/buffer_funcs.h +++ b/vlib/vlib/buffer_funcs.h @@ -254,7 +254,7 @@ u8 * vlib_validate_buffers (vlib_main_t * vm, clib_error_t * vlib_buffer_pool_create(vlib_main_t * vm, unsigned num_mbufs, - unsigned mbuf_size, unsigned socket_id); + unsigned socket_id); /** \brief Allocate buffers into supplied array @@ -425,11 +425,9 @@ vlib_buffer_chain_init(vlib_buffer_t *first) first->flags &= ~VLIB_BUFFER_NEXT_PRESENT; first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; #if DPDK == 1 - (((struct rte_mbuf *) first) - 1)->nb_segs = 1; - (((struct rte_mbuf *) first) - 1)->next = 0; - (((struct rte_mbuf *) first) - 1)->pkt_len = 0; - (((struct rte_mbuf *) first) - 1)->data_len = 0; - (((struct rte_mbuf *) first) - 1)->data_off = RTE_PKTMBUF_HEADROOM + first->current_data; + struct rte_mbuf * mb = rte_mbuf_from_vlib_buffer(first); + rte_pktmbuf_reset(mb); + mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + first->current_data; #endif } @@ -446,11 +444,17 @@ vlib_buffer_chain_buffer(vlib_main_t *vm, next_buffer->current_length = 0; next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT; #if DPDK == 1 - (((struct rte_mbuf *) first) - 1)->nb_segs++; - (((struct rte_mbuf *) last) - 1)->next = (((struct rte_mbuf *) next_buffer) - 1); - (((struct rte_mbuf *) next_buffer) - 1)->data_len = 0; - (((struct rte_mbuf *) next_buffer) - 1)->data_off = RTE_PKTMBUF_HEADROOM + next_buffer->current_data; - (((struct rte_mbuf *) next_buffer) - 1)->next = 0; + struct rte_mbuf * mb; + mb = rte_mbuf_from_vlib_buffer(first); + mb->nb_segs++; + + mb = rte_mbuf_from_vlib_buffer(last); + mb->next = rte_mbuf_from_vlib_buffer(next_buffer); + + mb = rte_mbuf_from_vlib_buffer(next_buffer); + mb->data_len = 0; + mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + next_buffer->current_data; + mb->next = 0; #endif return next_buffer; } @@ -468,8 +472,10 @@ vlib_buffer_chain_increase_length(vlib_buffer_t *first, if (first != last) first->total_length_not_including_first_buffer += len; #if DPDK == 1 - (((struct rte_mbuf *) first) - 1)->pkt_len += len; - (((struct rte_mbuf *) last) - 1)->data_len += len; + struct rte_mbuf * mb_first = rte_mbuf_from_vlib_buffer(first); + struct rte_mbuf * mb_last = rte_mbuf_from_vlib_buffer(last); + mb_first->pkt_len += len; + mb_last->data_len += len; #endif } @@ -589,6 +595,11 @@ vlib_buffer_init_for_free_list (vlib_buffer_t * _dst, vlib_buffer_union_t * dst = (vlib_buffer_union_t *) _dst; vlib_buffer_union_t * src = (vlib_buffer_union_t *) &fl->buffer_init_template; + /* Make sure vlib_buffer_t is cacheline aligned and sized */ + ASSERT(STRUCT_OFFSET_OF(vlib_buffer_t, cacheline0) == 0); + ASSERT(STRUCT_OFFSET_OF(vlib_buffer_t, cacheline1) == CLIB_CACHE_LINE_BYTES); + ASSERT(STRUCT_OFFSET_OF(vlib_buffer_t, cacheline2) == CLIB_CACHE_LINE_BYTES * 2); + /* Make sure buffer template is sane. */ ASSERT (fl->index == fl->buffer_init_template.free_list_index); diff --git a/vlib/vlib/dpdk_buffer.c b/vlib/vlib/dpdk_buffer.c index 04a6447d..db1fde18 100644 --- a/vlib/vlib/dpdk_buffer.c +++ b/vlib/vlib/dpdk_buffer.c @@ -66,12 +66,10 @@ #include -phys_addr_t __attribute__ ((weak)) rte_mem_virt2phy(); -int __attribute__ ((weak)) rte_eal_has_hugepages(); -unsigned __attribute__ ((weak)) rte_socket_id(); -struct rte_mempool * __attribute__ ((weak)) rte_mempool_create(); -void __attribute__ ((weak)) rte_pktmbuf_init(); -void __attribute__ ((weak)) rte_pktmbuf_pool_init(); +#pragma weak rte_mem_virt2phy +#pragma weak rte_eal_has_hugepages +#pragma weak rte_socket_id +#pragma weak rte_pktmbuf_pool_create uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm, vlib_buffer_t * b_first) { @@ -400,13 +398,13 @@ del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f) for (i = 0; i < vec_len (f->unaligned_buffers); i++) { b = vlib_get_buffer (vm, f->unaligned_buffers[i]); - mb = ((struct rte_mbuf *)b)-1; + mb = rte_mbuf_from_vlib_buffer(b); ASSERT(rte_mbuf_refcnt_read(mb) == 1); rte_pktmbuf_free (mb); } for (i = 0; i < vec_len (f->aligned_buffers); i++) { b = vlib_get_buffer (vm, f->aligned_buffers[i]); - mb = ((struct rte_mbuf *)b)-1; + mb = rte_mbuf_from_vlib_buffer(b); ASSERT(rte_mbuf_refcnt_read(mb) == 1); rte_pktmbuf_free (mb); } @@ -487,7 +485,7 @@ fill_free_list (vlib_main_t * vm, mb->data_off = RTE_PKTMBUF_HEADROOM; mb->nb_segs = 1; - b = (vlib_buffer_t *)(mb+1); + b = vlib_buffer_from_rte_mbuf(mb); bi = vlib_get_buffer_index (vm, b); vec_add1_aligned (fl->aligned_buffers, bi, sizeof (vlib_copy_unit_t)); @@ -726,7 +724,7 @@ vlib_buffer_free_inline (vlib_main_t * vm, { if (PREDICT_TRUE (b->clone_count == 0)) { - mb = ((struct rte_mbuf *)b)-1; + mb = rte_mbuf_from_vlib_buffer(b); ASSERT(rte_mbuf_refcnt_read(mb) == 1); rte_pktmbuf_free (mb); } @@ -820,7 +818,7 @@ vlib_packet_template_get_packet (vlib_main_t * vm, /* Fix up mbuf header length fields */ struct rte_mbuf * mb; - mb = ((struct rte_mbuf *)b) - 1; + mb = rte_mbuf_from_vlib_buffer(b); mb->data_len = b->current_length; mb->pkt_len = b->current_length; @@ -916,22 +914,26 @@ vlib_buffer_chain_append_data_with_alloc(vlib_main_t *vm, void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * b_first) { vlib_buffer_t *b = b_first, *prev = b_first; - struct rte_mbuf *mb_first = ((struct rte_mbuf *) b) - 1; + struct rte_mbuf *mb_prev, *mb, *mb_first; - mb_first->pkt_len = mb_first-> data_len = b_first->current_length; + mb_first = rte_mbuf_from_vlib_buffer(b_first); + + mb_first->pkt_len = mb_first->data_len = b_first->current_length; while (b->flags & VLIB_BUFFER_NEXT_PRESENT) { b = vlib_get_buffer(vm, b->next_buffer); + mb = rte_mbuf_from_vlib_buffer(b); + mb_prev = rte_mbuf_from_vlib_buffer(prev); mb_first->nb_segs++; mb_first->pkt_len += b->current_length; - (((struct rte_mbuf *) prev) - 1)->next = (((struct rte_mbuf *) b) - 1); - (((struct rte_mbuf *) b) - 1)->data_len = b->current_length; + mb_prev->next = mb; + mb->data_len = b->current_length; prev = b; } } clib_error_t * vlib_buffer_pool_create(vlib_main_t * vm, unsigned num_mbufs, - unsigned mbuf_size, unsigned socket_id) + unsigned socket_id) { vlib_buffer_main_t * bm = vm->buffer_main; vlib_physmem_main_t * vpm = &vm->physmem_main; @@ -939,7 +941,7 @@ vlib_buffer_pool_create(vlib_main_t * vm, unsigned num_mbufs, uword new_start, new_size; int i; - if (!rte_mempool_create) + if (!rte_pktmbuf_pool_create) return clib_error_return (0, "not linked with DPDK"); vec_validate_aligned(bm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES); @@ -949,12 +951,15 @@ vlib_buffer_pool_create(vlib_main_t * vm, unsigned num_mbufs, return 0; u8 * pool_name = format(0, "mbuf_pool_socket%u%c",socket_id, 0); - rmp = rte_mempool_create((char *) pool_name, - num_mbufs, mbuf_size, 512, - sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, NULL, - rte_pktmbuf_init, NULL, - socket_id, 0); + + rmp = rte_pktmbuf_pool_create((char *) pool_name, /* pool name */ + num_mbufs, /* number of mbufs */ + 512, /* cache size */ + VLIB_BUFFER_HDR_SIZE, /* priv size */ + VLIB_BUFFER_PRE_DATA_SIZE + + VLIB_BUFFER_DATA_SIZE, /* dataroom size */ + socket_id); /* cpu socket */ + vec_free(pool_name); if (rmp) diff --git a/vnet/vnet/devices/af_packet/node.c b/vnet/vnet/devices/af_packet/node.c index 15a96f44..efe0e1c2 100644 --- a/vnet/vnet/devices/af_packet/node.c +++ b/vnet/vnet/devices/af_packet/node.c @@ -94,11 +94,6 @@ buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi) vlib_buffer_t * b = vlib_get_buffer (vm, bi); vlib_buffer_t * first_b = vlib_get_buffer (vm, first_bi); vlib_buffer_t * prev_b = vlib_get_buffer (vm, prev_bi); -#if DPDK > 0 - struct rte_mbuf * mbuf = ((struct rte_mbuf *) b) - 1; - struct rte_mbuf * first_mbuf = ((struct rte_mbuf *) first_b) - 1; - struct rte_mbuf * prev_mbuf = ((struct rte_mbuf *) prev_b) - 1; -#endif /* update first buffer */ first_b->total_length_not_including_first_buffer += b->current_length; @@ -111,6 +106,9 @@ buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi) b->next_buffer = 0; #if DPDK > 0 + struct rte_mbuf * mbuf = rte_mbuf_from_vlib_buffer(b); + struct rte_mbuf * first_mbuf = rte_mbuf_from_vlib_buffer(first_b); + struct rte_mbuf * prev_mbuf = rte_mbuf_from_vlib_buffer(prev_b); first_mbuf->nb_segs++; prev_mbuf->next = mbuf; mbuf->data_len = b->current_length; diff --git a/vnet/vnet/devices/dpdk/device.c b/vnet/vnet/devices/dpdk/device.c index 02703cc5..dcf95803 100644 --- a/vnet/vnet/devices/dpdk/device.c +++ b/vnet/vnet/devices/dpdk/device.c @@ -87,7 +87,7 @@ static struct rte_mbuf * dpdk_replicate_packet_mb (vlib_buffer_t * b) unsigned socket_id = rte_socket_id(); ASSERT (bm->pktmbuf_pools[socket_id]); - pkt_mb = ((struct rte_mbuf *)b)-1; + pkt_mb = rte_mbuf_from_vlib_buffer(b); nb_segs = pkt_mb->nb_segs; for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--) { @@ -159,7 +159,7 @@ dpdk_tx_trace_buffer (dpdk_main_t * dm, dpdk_tx_dma_trace_t * t0; struct rte_mbuf * mb; - mb = ((struct rte_mbuf *)buffer)-1; + mb = rte_mbuf_from_vlib_buffer(buffer); t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0])); t0->queue_index = queue_id; @@ -541,7 +541,7 @@ dpdk_interface_tx (vlib_main_t * vm, { u32 bi0 = from[n_packets]; vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); - struct rte_mbuf *mb0 = ((struct rte_mbuf *)b0) - 1; + struct rte_mbuf *mb0 = rte_mbuf_from_vlib_buffer(b0); rte_pktmbuf_free (mb0); } return n_on_ring; @@ -584,9 +584,9 @@ dpdk_interface_tx (vlib_main_t * vm, pref0 = vlib_get_buffer (vm, pi0); pref1 = vlib_get_buffer (vm, pi1); - prefmb0 = ((struct rte_mbuf *)pref0) - 1; - prefmb1 = ((struct rte_mbuf *)pref1) - 1; - + prefmb0 = rte_mbuf_from_vlib_buffer(pref0); + prefmb1 = rte_mbuf_from_vlib_buffer(pref1); + CLIB_PREFETCH(prefmb0, CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH(pref0, CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH(prefmb1, CLIB_CACHE_LINE_BYTES, LOAD); @@ -599,8 +599,8 @@ dpdk_interface_tx (vlib_main_t * vm, b0 = vlib_get_buffer (vm, bi0); b1 = vlib_get_buffer (vm, bi1); - mb0 = ((struct rte_mbuf *)b0) - 1; - mb1 = ((struct rte_mbuf *)b1) - 1; + mb0 = rte_mbuf_from_vlib_buffer(b0); + mb1 = rte_mbuf_from_vlib_buffer(b1); any_clone = b0->clone_count | b1->clone_count; if (PREDICT_FALSE(any_clone != 0)) @@ -701,7 +701,7 @@ dpdk_interface_tx (vlib_main_t * vm, b0 = vlib_get_buffer (vm, bi0); - mb0 = ((struct rte_mbuf *)b0) - 1; + mb0 = rte_mbuf_from_vlib_buffer(b0); if (PREDICT_FALSE(b0->clone_count != 0)) { struct rte_mbuf * mb0_new = dpdk_replicate_packet_mb (b0); @@ -921,7 +921,7 @@ dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) vlib_buffer_main_t * bm = vm->buffer_main; memset(&conf, 0, sizeof(conf)); snprintf(conf.name, RTE_KNI_NAMESIZE, "vpp%u", xd->kni_port_id); - conf.mbuf_size = MBUF_SIZE; + conf.mbuf_size = VLIB_BUFFER_DATA_SIZE; memset(&ops, 0, sizeof(ops)); ops.port_id = xd->kni_port_id; ops.change_mtu = kni_change_mtu; diff --git a/vnet/vnet/devices/dpdk/dpdk.h b/vnet/vnet/devices/dpdk/dpdk.h index 656f39ea..14f7b3e8 100644 --- a/vnet/vnet/devices/dpdk/dpdk.h +++ b/vnet/vnet/devices/dpdk/dpdk.h @@ -62,7 +62,6 @@ #define always_inline static inline __attribute__ ((__always_inline__)) #endif -#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) #define NB_MBUF (32<<10) extern vnet_device_class_t dpdk_device_class; diff --git a/vnet/vnet/devices/dpdk/init.c b/vnet/vnet/devices/dpdk/init.c index 8fe95ae5..8bb253a3 100644 --- a/vnet/vnet/devices/dpdk/init.c +++ b/vnet/vnet/devices/dpdk/init.c @@ -1405,13 +1405,13 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) rte_dump_physmem_layout(stdout); /* main thread 1st */ - error = vlib_buffer_pool_create(vm, dm->num_mbufs, MBUF_SIZE, rte_socket_id()); + error = vlib_buffer_pool_create(vm, dm->num_mbufs, rte_socket_id()); if (error) return error; for (i = 0; i < RTE_MAX_LCORE; i++) { - error = vlib_buffer_pool_create(vm, dm->num_mbufs, MBUF_SIZE, + error = vlib_buffer_pool_create(vm, dm->num_mbufs, rte_lcore_to_socket_id(i)); if (error) return error; diff --git a/vnet/vnet/devices/dpdk/node.c b/vnet/vnet/devices/dpdk/node.c index 72d564ba..4f5a84ae 100644 --- a/vnet/vnet/devices/dpdk/node.c +++ b/vnet/vnet/devices/dpdk/node.c @@ -366,7 +366,7 @@ void dpdk_rx_trace (dpdk_main_t * dm, n_left -= 1; b0 = vlib_get_buffer (vm, bi0); - mb = ((struct rte_mbuf *)b0) - 1; + mb = rte_mbuf_from_vlib_buffer(b0); dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0, &next0, &error0); vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0); @@ -602,20 +602,20 @@ static inline u32 dpdk_device_input ( dpdk_main_t * dm, if (PREDICT_TRUE(n_buffers > 2)) { struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2]; - vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1); + vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb); CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, STORE); CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE); } ASSERT(mb); - b0 = (vlib_buffer_t *)(mb+1); + b0 = vlib_buffer_from_rte_mbuf(mb); /* check whether EFD is looking for packets to discard */ if (PREDICT_FALSE(efd_discard_burst)) { vlib_thread_main_t * tm = vlib_get_thread_main(); - + if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb))) { rte_pktmbuf_free(mb); @@ -633,7 +633,7 @@ static inline u32 dpdk_device_input ( dpdk_main_t * dm, if (PREDICT_FALSE(mb->nb_segs > 1)) { struct rte_mbuf *pfmb = mb->next; - vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1); + vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb); CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE); b_chain = b0; @@ -693,7 +693,7 @@ static inline u32 dpdk_device_input ( dpdk_main_t * dm, { ASSERT(mb_seg != 0); - b_seg = (vlib_buffer_t *)(mb_seg+1); + b_seg = vlib_buffer_from_rte_mbuf(mb_seg); vlib_buffer_init_for_free_list (b_seg, fl); b_seg->clone_count = 0; @@ -1280,13 +1280,13 @@ void dpdk_io_thread (vlib_worker_thread_t * w, if (PREDICT_TRUE(n_buffers > 1)) { struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2]; - vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1); + vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb); CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE); CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD); } - b0 = (vlib_buffer_t *)(mb+1); + b0 = vlib_buffer_from_rte_mbuf(mb); /* check whether EFD is looking for packets to discard */ if (PREDICT_FALSE(efd_discard_burst)) @@ -1310,7 +1310,7 @@ void dpdk_io_thread (vlib_worker_thread_t * w, if (PREDICT_FALSE(mb->nb_segs > 1)) { struct rte_mbuf *pfmb = mb->next; - vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1); + vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb); CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE); b_chain = b0; @@ -1361,7 +1361,7 @@ void dpdk_io_thread (vlib_worker_thread_t * w, { ASSERT(mb_seg != 0); - b_seg = (vlib_buffer_t *)(mb_seg+1); + b_seg = vlib_buffer_from_rte_mbuf(mb_seg); vlib_buffer_init_for_free_list (b_seg, fl); b_seg->clone_count = 0; @@ -1681,14 +1681,14 @@ dpdk_io_input (vlib_main_t * vm, if (PREDICT_TRUE(n_buffers > 1)) { struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2]; - vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1); + vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb); CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE); CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD); } - - b0 = (vlib_buffer_t *)(mb+1); - + + b0 = vlib_buffer_from_rte_mbuf(mb); + /* check whether EFD is looking for packets to discard */ if (PREDICT_FALSE(efd_discard_burst)) { @@ -1711,7 +1711,7 @@ dpdk_io_input (vlib_main_t * vm, if (PREDICT_FALSE(mb->nb_segs > 1)) { struct rte_mbuf *pfmb = mb->next; - vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1); + vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb); CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE); b_chain = b0; @@ -1762,7 +1762,7 @@ dpdk_io_input (vlib_main_t * vm, { ASSERT(mb_seg != 0); - b_seg = (vlib_buffer_t *)(mb_seg+1); + b_seg = vlib_buffer_from_rte_mbuf(mb_seg); vlib_buffer_init_for_free_list (b_seg, fl); b_seg->clone_count = 0; diff --git a/vnet/vnet/devices/ssvm/ssvm_eth.h b/vnet/vnet/devices/ssvm/ssvm_eth.h index 70d895b9..8f1f8896 100644 --- a/vnet/vnet/devices/ssvm/ssvm_eth.h +++ b/vnet/vnet/devices/ssvm/ssvm_eth.h @@ -35,7 +35,7 @@ extern vnet_device_class_t ssvm_eth_device_class; extern vlib_node_registration_t ssvm_eth_input_node; #define SSVM_BUFFER_SIZE \ - (VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES + VLIB_BUFFER_PRE_DATA_SIZE) + (VLIB_BUFFER_DATA_SIZE + VLIB_BUFFER_PRE_DATA_SIZE) #define SSVM_PACKET_TYPE 1 typedef struct { diff --git a/vnet/vnet/dpdk_replication.h b/vnet/vnet/dpdk_replication.h index b25558f9..6259c449 100644 --- a/vnet/vnet/dpdk_replication.h +++ b/vnet/vnet/dpdk_replication.h @@ -49,7 +49,7 @@ vlib_dpdk_clone_buffer (vlib_main_t * vm, vlib_buffer_t * b) return 0; src_buf = b; - rv = dst_buf = (vlib_buffer_t *)(rte_mbufs[0] + 1); + rv = dst_buf = vlib_buffer_from_rte_mbuf(rte_mbufs[0]); vlib_buffer_init_for_free_list (dst_buf, fl); copy_src = b->data + src_buf->current_data; copy_dst = dst_buf->data + src_buf->current_data; @@ -75,7 +75,7 @@ vlib_dpdk_clone_buffer (vlib_main_t * vm, vlib_buffer_t * b) if (i < new_buffers_needed - 1) { src_buf = vlib_get_buffer (vm, src_buf->next_buffer); - dst_buf = (vlib_buffer_t *)(rte_mbufs[i+1] + 1); + dst_buf = vlib_buffer_from_rte_mbuf(rte_mbufs[i+1]); vlib_buffer_init_for_free_list (dst_buf, fl); copy_src = src_buf->data; copy_dst = dst_buf->data; @@ -87,7 +87,7 @@ vlib_dpdk_clone_buffer (vlib_main_t * vm, vlib_buffer_t * b) if (rte_mempool_get_bulk (rmp, (void **)rte_mbufs, 1) < 0) return 0; - rv = (vlib_buffer_t *)(rte_mbufs[0] + 1); + rv = vlib_buffer_from_rte_mbuf(rte_mbufs[0]); vlib_buffer_init_for_free_list (rv, fl); memcpy(rv->data + b->current_data, b->data + b->current_data, diff --git a/vnet/vnet/ip/ip.h b/vnet/vnet/ip/ip.h index 76a2552f..de46ad38 100644 --- a/vnet/vnet/ip/ip.h +++ b/vnet/vnet/ip/ip.h @@ -162,7 +162,7 @@ ip_incremental_checksum_buffer (vlib_main_t * vm, vlib_buffer_t * first_buffer, #if DPDK > 0 { u32 n_bytes_left = n_bytes_to_checksum; - struct rte_mbuf * mb = ((struct rte_mbuf *)first_buffer)-1; + struct rte_mbuf * mb = rte_mbuf_from_vlib_buffer(first_buffer); u8 nb_segs = mb->nb_segs; ASSERT(mb->data_len >= first_buffer_offset); void * h; diff --git a/vnet/vnet/ip/ip6_forward.c b/vnet/vnet/ip/ip6_forward.c index 1d0e21e5..a478bab5 100644 --- a/vnet/vnet/ip/ip6_forward.c +++ b/vnet/vnet/ip/ip6_forward.c @@ -1445,7 +1445,7 @@ u16 ip6_tcp_udp_icmp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0, ip6 #if DPDK > 0 if (p0) { - struct rte_mbuf *mb = ((struct rte_mbuf *)p0)-1; + struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer(p0); u8 nb_segs = mb->nb_segs; n_this_buffer = (p0->current_length > headers_size ? diff --git a/vnet/vnet/pg/input.c b/vnet/vnet/pg/input.c index 4ec61ca7..38402c2e 100644 --- a/vnet/vnet/pg/input.c +++ b/vnet/vnet/pg/input.c @@ -61,7 +61,7 @@ pg_set_mbuf_metadata (pg_main_t * pg, u32 * buffers, u32 n_alloc) for (i = 0; i < n_alloc; i++) { b = vlib_get_buffer (vm, buffers[i]); - mb = ((struct rte_mbuf *)b) - 1; + mb = rte_mbuf_from_vlib_buffer(b); delta = vlib_buffer_length_in_chain (vm, b) - (i16) mb->pkt_len; new_data_len = (u16)((i16) mb->data_len + delta); @@ -1473,9 +1473,9 @@ pg_stream_fill (pg_main_t * pg, pg_stream_t * s, u32 n_buffers) ({ vlib_buffer_t * b; struct rte_mbuf *mb; - + b = vlib_get_buffer(vm, bi0[0]); - mb = (struct rte_mbuf *)b - 1; + mb = rte_mbuf_from_vlib_buffer(b); ASSERT(rte_mbuf_refcnt_read(mb) == 1); })); } diff --git a/vnet/vnet/unix/tapcli.c b/vnet/vnet/unix/tapcli.c index 5b0ac937..7776b9ee 100644 --- a/vnet/vnet/unix/tapcli.c +++ b/vnet/vnet/unix/tapcli.c @@ -220,7 +220,7 @@ static uword tapcli_rx_iface(vlib_main_t * vm, tapcli_interface_t * ti) { tapcli_main_t * tm = &tapcli_main; - const uword buffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES; + const uword buffer_size = VLIB_BUFFER_DATA_SIZE; u32 n_trace = vlib_get_trace_count (vm, node); u8 set_trace = 0; @@ -434,7 +434,7 @@ static clib_error_t * tapcli_config (vlib_main_t * vm, unformat_input_t * input) { tapcli_main_t *tm = &tapcli_main; - const uword buffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES; + const uword buffer_size = VLIB_BUFFER_DATA_SIZE; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { diff --git a/vnet/vnet/unix/tuntap.c b/vnet/vnet/unix/tuntap.c index 77c60fd6..7ea0b703 100644 --- a/vnet/vnet/unix/tuntap.c +++ b/vnet/vnet/unix/tuntap.c @@ -200,12 +200,11 @@ tuntap_rx (vlib_main_t * vm, tuntap_main_t * tm = &tuntap_main; vlib_buffer_t * b; u32 bi; + const uword buffer_size = VLIB_BUFFER_DATA_SIZE; #if DPDK == 0 - const uword buffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES; u32 free_list_index = VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX; #else dpdk_main_t * dm = &dpdk_main; - const uword buffer_size = MBUF_SIZE; u32 free_list_index = dm->vlib_buffer_free_list_index; #endif @@ -262,7 +261,7 @@ tuntap_rx (vlib_main_t * vm, #endif b = vlib_get_buffer (vm, tm->rx_buffers[i_rx]); #if DPDK == 1 - mb = (((struct rte_mbuf *)b)-1); + mb = rte_mbuf_from_vlib_buffer(b); #endif b->flags = 0; b->current_data = 0; @@ -445,12 +444,7 @@ tuntap_config (vlib_main_t * vm, unformat_input_t * input) u8 * name; int flags = IFF_TUN | IFF_NO_PI; int is_enabled = 0, is_ether = 0, have_normal_interface = 0; -#if DPDK == 0 - const uword buffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES; -#else - const uword buffer_size = MBUF_SIZE; -#endif - + const uword buffer_size = VLIB_BUFFER_DATA_SIZE; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { -- cgit 1.2.3-korg From dab123ba697ecd9c2310b524d37509649fbcb173 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 29 Mar 2016 16:43:58 +0200 Subject: Add abbility to specify dpdk tarball download base url Change-Id: Ieceb0b1f4ab69609961d8946f627dd9311963da5 Signed-off-by: Damjan Marion --- dpdk/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index dabbf6ad..e6e2b5f8 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -24,8 +24,9 @@ DPDK_DEBUG ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 2.2.0 +DPDK_BASE_URL ?= http://dpdk.org/browse/dpdk/snapshot DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.gz -DPDK_TAR_URL := http://dpdk.org/browse/dpdk/snapshot/$(DPDK_TARBALL) +DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_2.1.0_TARBALL_MD5_CKSUM := 205a0d12bfd6eb717d57506272f43519 DPDK_2.2.0_TARBALL_MD5_CKSUM := 22e2fd68cd5504f43fe9a5a6fd6dd938 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) -- cgit 1.2.3-korg From 0df78dda5ae1e1e082c2e0ea282ba5494c3d4b31 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 29 Mar 2016 22:37:02 +0200 Subject: Add DPDK 16.04-rc2 support Can be used by specifying DPDK_VERSION=16.04-rc2 in the make command line Change-Id: I657b44d7ca22f1ef57756e7703088020fab12bc6 Signed-off-by: Damjan Marion --- Makefile | 3 ++- dpdk/Makefile | 5 ++++- vnet/vnet/devices/virtio/vhost-user.h | 4 ++-- 3 files changed, 8 insertions(+), 4 deletions(-) (limited to 'dpdk/Makefile') diff --git a/Makefile b/Makefile index a9b4d9ab..31222caf 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ RPM_DEPENDS = redhat-lsb glibc-static java-1.8.0-openjdk-devel RPM_DEPENDS += openssl-devel https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm apr-devel EPEL_DEPENDS = libconfuse-devel ganglia-devel -ifneq ("$(wildcard $(STARTUP_DIR)/startup.conf),"") +ifneq ($(wildcard $(STARTUP_DIR)/startup.conf),) STARTUP_CONF ?= $(STARTUP_DIR)/startup.conf endif @@ -76,6 +76,7 @@ help: @echo " STARTUP_DIR = $(STARTUP_DIR)" @echo " GDB = $(GDB)" @echo " PLATFORM = $(PLATFORM)" + @echo " DPDK_VERSION = $(DPDK_VERSION)" $(BR)/.bootstrap.ok: ifeq ("$(shell lsb_release -si)", "Ubuntu") diff --git a/dpdk/Makefile b/dpdk/Makefile index e6e2b5f8..316efc01 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -29,6 +29,7 @@ DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.gz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_2.1.0_TARBALL_MD5_CKSUM := 205a0d12bfd6eb717d57506272f43519 DPDK_2.2.0_TARBALL_MD5_CKSUM := 22e2fd68cd5504f43fe9a5a6fd6dd938 +DPDK_16.04-rc2_TARBALL_MD5_CKSUM := a642985d2694d8cf8fdfdf0723be3406 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) DPDK_TARGET := x86_64-native-linuxapp-gcc JOBS := $(shell grep processor /proc/cpuinfo | wc -l) @@ -82,7 +83,7 @@ all: build $(B)/custom-config: $(B)/.patch.ok Makefile @echo --- generating custom config from $(DPDK_SOURCE)/config/common_linuxapp --- - @cp $(DPDK_SOURCE)/config/common_linuxapp $@ + @cpp -undef -ffreestanding -x assembler-with-cpp $(DPDK_SOURCE)/config/common_linuxapp $@ $(call set,RTE_MACHINE,$(DPDK_MACHINE)) $(call set,RTE_ARCH,"x86_64") $(call set,RTE_ARCH_X86_64,y) @@ -145,11 +146,13 @@ $(B)/.extract.ok: $(B)/.download.ok extract: $(B)/.extract.ok $(B)/.patch.ok: $(B)/.extract.ok +ifneq ($(wildcard $(CURDIR)/dpdk-$(DPDK_VERSION)_patches/*.patch),) @echo --- patching --- for f in $(CURDIR)/dpdk-$(DPDK_VERSION)_patches/*.patch ; do \ echo Applying patch: $$(basename $$f) ; \ patch -p1 -d $(DPDK_SOURCE) < $$f ; \ done +endif @touch $@ .PHONY: patch diff --git a/vnet/vnet/devices/virtio/vhost-user.h b/vnet/vnet/devices/virtio/vhost-user.h index be011165..d6d55731 100644 --- a/vnet/vnet/devices/virtio/vhost-user.h +++ b/vnet/vnet/devices/virtio/vhost-user.h @@ -78,11 +78,11 @@ typedef struct vhost_user_memory { vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS]; } vhost_user_memory_t; -typedef struct vhost_vring_state { +typedef struct { unsigned int index, num; } vhost_vring_state_t; -typedef struct vhost_vring_addr { +typedef struct { unsigned int index, flags; u64 desc_user_addr, used_user_addr, avail_user_addr, log_guest_addr; } vhost_vring_addr_t; -- cgit 1.2.3-korg From 5ba5f83b797c2629b89cabbefea1c8bb4dccbbcd Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 11 Apr 2016 12:51:00 +0200 Subject: Bump DPDK 16.04 to RC4 Change-Id: Ia9affeee54e860b6039d7ee0f411bd022b1dc76c Signed-off-by: Damjan Marion --- dpdk/Makefile | 2 +- vnet/vnet/devices/dpdk/init.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 316efc01..e1aafcbb 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -29,7 +29,7 @@ DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.gz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_2.1.0_TARBALL_MD5_CKSUM := 205a0d12bfd6eb717d57506272f43519 DPDK_2.2.0_TARBALL_MD5_CKSUM := 22e2fd68cd5504f43fe9a5a6fd6dd938 -DPDK_16.04-rc2_TARBALL_MD5_CKSUM := a642985d2694d8cf8fdfdf0723be3406 +DPDK_16.04-rc4_TARBALL_MD5_CKSUM := 0de766a629999881e1c6e0de25d92bc0 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) DPDK_TARGET := x86_64-native-linuxapp-gcc JOBS := $(shell grep processor /proc/cpuinfo | wc -l) diff --git a/vnet/vnet/devices/dpdk/init.c b/vnet/vnet/devices/dpdk/init.c index 822b05d4..17938697 100644 --- a/vnet/vnet/devices/dpdk/init.c +++ b/vnet/vnet/devices/dpdk/init.c @@ -1512,6 +1512,35 @@ void dpdk_update_link_state (dpdk_device_t * xd, f64 now) break; } } +#if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0) + if (hw_flags_chg || (xd->link.link_speed != prev_link.link_speed)) + { + hw_flags_chg = 1; + switch (xd->link.link_speed) + { + case ETH_SPEED_NUM_10M: + hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_10M; + break; + case ETH_SPEED_NUM_100M: + hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_100M; + break; + case ETH_SPEED_NUM_1G: + hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_1G; + break; + case ETH_SPEED_NUM_10G: + hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_10G; + break; + case ETH_SPEED_NUM_40G: + hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_40G; + break; + case 0: + break; + default: + clib_warning("unknown link speed %d", xd->link.link_speed); + break; + } + } +#else if (hw_flags_chg || (xd->link.link_speed != prev_link.link_speed)) { hw_flags_chg = 1; @@ -1539,6 +1568,7 @@ void dpdk_update_link_state (dpdk_device_t * xd, f64 now) break; } } +#endif if (hw_flags_chg) { if (LINK_STATE_ELOGS) -- cgit 1.2.3-korg From c42552d4e0f358c5c473b64aafe3057a4914f71a Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 12 Apr 2016 05:10:25 +0200 Subject: Add support for DPDK 16.04 release, rebase some of 2.2.0 patches Change-Id: I08292ba39dc6012c2edbcdaed0b02a8ebe07aec4 Signed-off-by: Damjan Marion --- dpdk/Makefile | 2 +- ...1-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch | 75 +++++++++++++++++++ ...a-bit-longer-for-autonegotiation-to-leave.patch | 25 +++++++ ...Cleanup-virtio-pmd-debug-log-output-reset.patch | 65 +++++++++++++++++ ...f-rearrange-rte_mbuf-metadata-to-suit-vpp.patch | 83 ++++++++++++++++++++++ ...low-applications-to-override-rte_delay_us.patch | 43 +++++++++++ ...mporarily-disable-unthrottled-log-message.patch | 26 +++++++ 7 files changed, 318 insertions(+), 1 deletion(-) create mode 100644 dpdk/dpdk-16.04_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch create mode 100644 dpdk/dpdk-16.04_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch create mode 100644 dpdk/dpdk-16.04_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch create mode 100644 dpdk/dpdk-16.04_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch create mode 100644 dpdk/dpdk-16.04_patches/0005-Allow-applications-to-override-rte_delay_us.patch create mode 100644 dpdk/dpdk-16.04_patches/0006-Temporarily-disable-unthrottled-log-message.patch (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index e1aafcbb..04ab94d3 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -29,7 +29,7 @@ DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.gz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_2.1.0_TARBALL_MD5_CKSUM := 205a0d12bfd6eb717d57506272f43519 DPDK_2.2.0_TARBALL_MD5_CKSUM := 22e2fd68cd5504f43fe9a5a6fd6dd938 -DPDK_16.04-rc4_TARBALL_MD5_CKSUM := 0de766a629999881e1c6e0de25d92bc0 +DPDK_16.04_TARBALL_MD5_CKSUM := 0728d506d7f56eb64233e824fa3c098a DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) DPDK_TARGET := x86_64-native-linuxapp-gcc JOBS := $(shell grep processor /proc/cpuinfo | wc -l) diff --git a/dpdk/dpdk-16.04_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch b/dpdk/dpdk-16.04_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch new file mode 100644 index 00000000..044a4179 --- /dev/null +++ b/dpdk/dpdk-16.04_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch @@ -0,0 +1,75 @@ +From c085c9f9a7332c63d002169581edc89ef99fdbb1 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Wed, 16 Dec 2015 03:21:21 +0100 +Subject: [PATCH 1/6] e1000: Set VLAN Rx Offload tag correctly + +--- + drivers/net/e1000/igb_rxtx.c | 30 ++++++++++++++++++++++++++++++ + lib/librte_ether/rte_ether.h | 3 +++ + 2 files changed, 33 insertions(+) + +diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c +index 4a987e3..d6a4ce5 100644 +--- a/drivers/net/e1000/igb_rxtx.c ++++ b/drivers/net/e1000/igb_rxtx.c +@@ -904,6 +904,21 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); + pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); ++ { ++ /* ++ * Check packet for VLAN ethernet types and set ++ * RX Offload flag PKT_RX_VLAN_PKT accordingly. ++ */ ++ struct ether_hdr *eth_hdr = ++ rte_pktmbuf_mtod(rxm, struct ether_hdr *); ++ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); ++ ++ if ((eth_type == ETHER_TYPE_VLAN) || ++ (eth_type == ETHER_TYPE_VLAN_AD) || ++ (eth_type == ETHER_TYPE_VLAN_9100) || ++ (eth_type == ETHER_TYPE_VLAN_9200)) ++ pkt_flags |= PKT_RX_VLAN_PKT; ++ } + rxm->ol_flags = pkt_flags; + rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower. + lo_dword.hs_rss.pkt_info); +@@ -1140,6 +1155,21 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); + pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); ++ { ++ /* ++ * Check packet for VLAN ethernet types and set ++ * RX Offload flag PKT_RX_VLAN_PKT accordingly. ++ */ ++ struct ether_hdr *eth_hdr = ++ rte_pktmbuf_mtod(rxm, struct ether_hdr *); ++ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); ++ ++ if ((eth_type == ETHER_TYPE_VLAN) || ++ (eth_type == ETHER_TYPE_VLAN_AD) || ++ (eth_type == ETHER_TYPE_VLAN_9100) || ++ (eth_type == ETHER_TYPE_VLAN_9200)) ++ pkt_flags |= PKT_RX_VLAN_PKT; ++ } + first_seg->ol_flags = pkt_flags; + first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb. + lower.lo_dword.hs_rss.pkt_info); +diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h +index 1d62d8e..341121a 100644 +--- a/lib/librte_ether/rte_ether.h ++++ b/lib/librte_ether/rte_ether.h +@@ -332,6 +332,9 @@ struct vxlan_hdr { + #define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */ + #define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */ + #define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */ ++#define ETHER_TYPE_VLAN_AD 0x88a8 /**< IEEE 802.1AD VLAN tagging. */ ++#define ETHER_TYPE_VLAN_9100 0x9100 /**< VLAN 0x9100 tagging. */ ++#define ETHER_TYPE_VLAN_9200 0x9200 /**< VLAN 0x9200 tagging. */ + + #define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr)) + /**< VXLAN tunnel header length. */ +-- +2.7.4 + diff --git a/dpdk/dpdk-16.04_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch b/dpdk/dpdk-16.04_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch new file mode 100644 index 00000000..4b385467 --- /dev/null +++ b/dpdk/dpdk-16.04_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch @@ -0,0 +1,25 @@ +From 8e1be5044b5ee29c8cb3921051fb6d0722b60651 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Wed, 16 Dec 2015 03:22:11 +0100 +Subject: [PATCH 2/6] ixgbe: Wait a bit longer for autonegotiation to leave + +--- + drivers/net/ixgbe/base/ixgbe_82599.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c +index 154c1f1..817a8b5 100644 +--- a/drivers/net/ixgbe/base/ixgbe_82599.c ++++ b/drivers/net/ixgbe/base/ixgbe_82599.c +@@ -2470,7 +2470,7 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, + autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); + /* Wait for AN to leave state 0 */ +- for (i = 0; i < 10; i++) { ++ for (i = 0; i < 50; i++) { + msec_delay(4); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) +-- +2.7.4 + diff --git a/dpdk/dpdk-16.04_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch b/dpdk/dpdk-16.04_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch new file mode 100644 index 00000000..8c53d0f1 --- /dev/null +++ b/dpdk/dpdk-16.04_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch @@ -0,0 +1,65 @@ +From 1ee05e874eaa3f03ee7b5fbd6a32dff7304bd620 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Wed, 16 Dec 2015 03:29:22 +0100 +Subject: [PATCH 3/6] virtio: Cleanup virtio pmd debug log output, reset + +--- + drivers/net/virtio/virtio_ethdev.c | 5 ----- + drivers/net/virtio/virtio_rxtx.c | 4 +++- + 2 files changed, 3 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index 63a368a..ed4e757 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -1405,18 +1405,13 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet + link.link_speed = SPEED_10G; + + if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { +- PMD_INIT_LOG(DEBUG, "Get link status from hw"); + vtpci_read_dev_config(hw, + offsetof(struct virtio_net_config, status), + &status, sizeof(status)); + if ((status & VIRTIO_NET_S_LINK_UP) == 0) { + link.link_status = ETH_LINK_DOWN; +- PMD_INIT_LOG(DEBUG, "Port %d is down", +- dev->data->port_id); + } else { + link.link_status = ETH_LINK_UP; +- PMD_INIT_LOG(DEBUG, "Port %d is up", +- dev->data->port_id); + } + } else { + link.link_status = ETH_LINK_UP; +diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c +index ef21d8e..7fe14ad 100644 +--- a/drivers/net/virtio/virtio_rxtx.c ++++ b/drivers/net/virtio/virtio_rxtx.c +@@ -643,6 +643,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + rxm->next = NULL; + rxm->pkt_len = (uint32_t)(len[i] - hdr_size); + rxm->data_len = (uint16_t)(len[i] - hdr_size); ++ rxm->ol_flags = 0; + + if (hw->vlan_strip) + rte_vlan_strip(rxm); +@@ -760,6 +761,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, + rxm->vlan_tci = 0; + rxm->pkt_len = (uint32_t)(len[0] - hdr_size); + rxm->data_len = (uint16_t)(len[0] - hdr_size); ++ rxm->ol_flags = 0; + + rxm->port = rxvq->port_id; + rx_pkts[nb_rx] = rxm; +@@ -863,7 +865,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + if (unlikely(nb_pkts < 1)) + return nb_pkts; + +- PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); ++ PMD_TX_LOG(DEBUG, "%d packets to xmit\n", nb_pkts); + nb_used = VIRTQUEUE_NUSED(txvq); + + virtio_rmb(); +-- +2.7.4 + diff --git a/dpdk/dpdk-16.04_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch b/dpdk/dpdk-16.04_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch new file mode 100644 index 00000000..78d0c637 --- /dev/null +++ b/dpdk/dpdk-16.04_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch @@ -0,0 +1,83 @@ +From eed80f56477e26a5711ea3749d1881797b3c82a5 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Wed, 16 Dec 2015 04:25:23 +0100 +Subject: [PATCH 4/6] mbuf: rearrange rte_mbuf metadata to suit vpp + +--- + .../linuxapp/eal/include/exec-env/rte_kni_common.h | 5 +++-- + lib/librte_mbuf/rte_mbuf.h | 20 ++++++++++++-------- + 2 files changed, 15 insertions(+), 10 deletions(-) + +diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h +index 7e5e598..fdbeb4a 100644 +--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h ++++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h +@@ -118,11 +118,12 @@ struct rte_kni_mbuf { + char pad2[4]; + uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */ + uint16_t data_len; /**< Amount of data in segment buffer. */ ++ char pad3[8]; ++ void *next; + + /* fields on second cache line */ +- char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_MIN_SIZE))); ++ char pad4[16] __attribute__((__aligned__(RTE_CACHE_LINE_MIN_SIZE))); + void *pool; +- void *next; + }; + + /* +diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h +index 75a227d..ca4d0fb 100644 +--- a/lib/librte_mbuf/rte_mbuf.h ++++ b/lib/librte_mbuf/rte_mbuf.h +@@ -731,6 +731,12 @@ typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes + /** + * The generic rte_mbuf, containing a packet mbuf. + */ ++/* ++ * offload in the second cache line, next in the first. Better for vpp ++ * at least as of right now. ++ * If you change this structure, you must change the user-mode ++ * version in rte_mbuf.h ++ */ + struct rte_mbuf { + MARKER cacheline0; + +@@ -783,6 +789,12 @@ struct rte_mbuf { + uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ + uint16_t data_len; /**< Amount of data in segment buffer. */ + uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */ ++ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ ++ uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ ++ struct rte_mbuf *next; /**< Next segment of scattered packet. */ ++ ++ /* second cache line - fields only used in slow path or on TX */ ++ MARKER cacheline1 __rte_cache_min_aligned; + + union { + uint32_t rss; /**< RSS hash result if RSS enabled */ +@@ -806,20 +818,12 @@ struct rte_mbuf { + uint32_t usr; /**< User defined tags. See rte_distributor_process() */ + } hash; /**< hash information */ + +- uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ +- +- uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ +- +- /* second cache line - fields only used in slow path or on TX */ +- MARKER cacheline1 __rte_cache_min_aligned; +- + union { + void *userdata; /**< Can be used for external metadata */ + uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */ + }; + + struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */ +- struct rte_mbuf *next; /**< Next segment of scattered packet. */ + + /* fields to support TX offloads */ + union { +-- +2.7.4 + diff --git a/dpdk/dpdk-16.04_patches/0005-Allow-applications-to-override-rte_delay_us.patch b/dpdk/dpdk-16.04_patches/0005-Allow-applications-to-override-rte_delay_us.patch new file mode 100644 index 00000000..8a32f600 --- /dev/null +++ b/dpdk/dpdk-16.04_patches/0005-Allow-applications-to-override-rte_delay_us.patch @@ -0,0 +1,43 @@ +From 3432c140c9c51e671a4d58bb428d5852426add1f Mon Sep 17 00:00:00 2001 +From: "Todd Foggoa (tfoggoa)" +Date: Wed, 3 Feb 2016 08:35:27 -0800 +Subject: [PATCH 5/6] Allow applications to override rte_delay_us() + +Some applications may wish to define their own implentation of +usec delay other than the existing blocking one. The default +behavior remains unchanged. + +Signed-off-by: Todd Foggoa (tfoggoa) +--- + lib/librte_eal/common/eal_common_timer.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/lib/librte_eal/common/eal_common_timer.c b/lib/librte_eal/common/eal_common_timer.c +index c4227cd..cc26b91 100644 +--- a/lib/librte_eal/common/eal_common_timer.c ++++ b/lib/librte_eal/common/eal_common_timer.c +@@ -47,9 +47,21 @@ + /* The frequency of the RDTSC timer resolution */ + static uint64_t eal_tsc_resolution_hz; + ++/* Allow an override of the rte_delay_us function */ ++int rte_delay_us_override (unsigned us) __attribute__((weak)); ++ ++int ++rte_delay_us_override(__attribute__((unused)) unsigned us) ++{ ++ return 0; ++} ++ + void + rte_delay_us(unsigned us) + { ++ if (rte_delay_us_override(us)) ++ return; ++ + const uint64_t start = rte_get_timer_cycles(); + const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6; + while ((rte_get_timer_cycles() - start) < ticks) +-- +2.7.4 + diff --git a/dpdk/dpdk-16.04_patches/0006-Temporarily-disable-unthrottled-log-message.patch b/dpdk/dpdk-16.04_patches/0006-Temporarily-disable-unthrottled-log-message.patch new file mode 100644 index 00000000..22415226 --- /dev/null +++ b/dpdk/dpdk-16.04_patches/0006-Temporarily-disable-unthrottled-log-message.patch @@ -0,0 +1,26 @@ +From 454e25ed57c17ec18ee76ead4a75f9abdf579608 Mon Sep 17 00:00:00 2001 +From: Dave Barach +Date: Tue, 9 Feb 2016 10:22:39 -0500 +Subject: [PATCH 6/6] Temporarily disable unthrottled log message. + +Signed-off-by: Dave Barach +--- + lib/librte_eal/linuxapp/eal/eal_interrupts.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c +index 06b26a9..8d918a4 100644 +--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c ++++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c +@@ -711,6 +711,8 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds) + if (errno == EINTR || errno == EWOULDBLOCK) + continue; + ++ /* $$$ disable to avoid filling /var/log */ ++ if (0) + RTE_LOG(ERR, EAL, "Error reading from file " + "descriptor %d: %s\n", + events[n].data.fd, +-- +2.7.4 + -- cgit 1.2.3-korg From 106f0eda8d42de69492d97db0cd8e046d9093786 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 26 Apr 2016 10:44:28 +0200 Subject: Gernerate dpdk config out of target specific files DPDK 16.04 introduced new config parameter RTE_ARCH_X86 which was missing in the dpdk/Makefile That caused issues with virtio devices on older kernels (observed on 3.13.0). This patch changes the source of generated config so it will also include all defines in config/defconfig_TARGET file. Change-Id: Idc6ccbef1d01da84235281ea1078c081846446de Signed-off-by: Damjan Marion --- dpdk/Makefile | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 04ab94d3..38eef929 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -82,14 +82,9 @@ endef all: build $(B)/custom-config: $(B)/.patch.ok Makefile - @echo --- generating custom config from $(DPDK_SOURCE)/config/common_linuxapp --- - @cpp -undef -ffreestanding -x assembler-with-cpp $(DPDK_SOURCE)/config/common_linuxapp $@ + @echo --- generating custom config from $(DPDK_SOURCE)/config/defconfig_$(DPDK_TARGET) --- + @cpp -undef -ffreestanding -x assembler-with-cpp $(DPDK_SOURCE)/config/defconfig_$(DPDK_TARGET) $@ $(call set,RTE_MACHINE,$(DPDK_MACHINE)) - $(call set,RTE_ARCH,"x86_64") - $(call set,RTE_ARCH_X86_64,y) - $(call set,RTE_ARCH_64,y) - $(call set,RTE_TOOLCHAIN_GCC,y) - $(call set,RTE_TOOLCHAIN,"gcc") @# modify options $(call set,RTE_MAX_LCORE,256) $(call set,RTE_PKTMBUF_HEADROOM,$(DPDK_PKTMBUF_HEADROOM)) -- cgit 1.2.3-korg From fed985c898a0c6383a95bcf57db38bbd14041326 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Mon, 25 Apr 2016 08:51:21 -0400 Subject: Build vpp w/ dpdk-16.04 by default Change-Id: I82cdd82213517c51064eb439c44068d8977e5619 Signed-off-by: Dave Barach --- dpdk/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 38eef929..398fc60f 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -23,7 +23,7 @@ DPDK_DEBUG ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) -DPDK_VERSION ?= 2.2.0 +DPDK_VERSION ?= 16.04 DPDK_BASE_URL ?= http://dpdk.org/browse/dpdk/snapshot DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.gz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) -- cgit 1.2.3-korg From e39a7b8347b1055cca6e9cfadcfeca23f8236eb9 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 26 Apr 2016 14:54:57 +0200 Subject: Fix compile errors reported by clang For using clang as a compiler it is enough to specify CC=clang in the make command line Change-Id: I06f1c1d418b68768f8119de5bdc8748c51f90c02 Signed-off-by: Damjan Marion --- dpdk/Makefile | 13 +++++++-- .../0010-Fix-O0-clang-build.patch | 32 ++++++++++++++++++++++ vlib/vlib/pci/pci_config.h | 2 +- vlib/vlib/unix/pci.c | 2 +- vnet/vnet/ip/adj_alloc.c | 2 +- vnet/vnet/lisp-cp/packets.c | 2 +- vnet/vnet/lisp-gpe/decap.c | 2 +- vnet/vnet/lisp-gpe/lisp_gpe.c | 2 +- vnet/vnet/vcgn/cnat_global.c | 2 +- vnet/vnet/vcgn/cnat_ipv4_udp_outside_input.c | 4 +-- vnet/vnet/vcgn/cnat_logging.c | 2 +- vnet/vnet/vcgn/cnat_ports.c | 2 +- vnet/vnet/vcgn/cnat_syslog.c | 2 +- vpp/vnet/main.c | 2 +- 14 files changed, 55 insertions(+), 16 deletions(-) create mode 100644 dpdk/dpdk-16.04_patches/0010-Fix-O0-clang-build.patch (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 398fc60f..fadbb268 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -31,7 +31,14 @@ DPDK_2.1.0_TARBALL_MD5_CKSUM := 205a0d12bfd6eb717d57506272f43519 DPDK_2.2.0_TARBALL_MD5_CKSUM := 22e2fd68cd5504f43fe9a5a6fd6dd938 DPDK_16.04_TARBALL_MD5_CKSUM := 0728d506d7f56eb64233e824fa3c098a DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) -DPDK_TARGET := x86_64-native-linuxapp-gcc + +ifneq (,$(findstring clang,$(CC))) +DPDK_CC=clang +else +DPDK_CC=gcc +endif + +DPDK_TARGET := x86_64-native-linuxapp-$(DPDK_CC) JOBS := $(shell grep processor /proc/cpuinfo | wc -l) # compiler/linker custom arguments @@ -40,9 +47,9 @@ DPDK_CPU_LDFLAGS := DPDK_EXTRA_LDFLAGS := -g ifeq ($(DPDK_DEBUG),n) -DPDK_EXTRA_CFLAGS := -g +DPDK_EXTRA_CFLAGS := -g else -DPDK_EXTRA_CFLAGS := -g -O0 +DPDK_EXTRA_CFLAGS := -g -O0 endif # translate gcc march values to DPDK arch diff --git a/dpdk/dpdk-16.04_patches/0010-Fix-O0-clang-build.patch b/dpdk/dpdk-16.04_patches/0010-Fix-O0-clang-build.patch new file mode 100644 index 00000000..2ce0e7c8 --- /dev/null +++ b/dpdk/dpdk-16.04_patches/0010-Fix-O0-clang-build.patch @@ -0,0 +1,32 @@ +From 2b82c248638bba6e98ecf388c6e0b1f5f0b44028 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Tue, 26 Apr 2016 12:36:52 +0200 +Subject: [PATCH] Fix -O0 clang build + +Signed-off-by: Damjan Marion +--- + lib/librte_eal/common/include/arch/x86/rte_rtm.h | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/lib/librte_eal/common/include/arch/x86/rte_rtm.h b/lib/librte_eal/common/include/arch/x86/rte_rtm.h +index d935641..30c1969 100644 +--- a/lib/librte_eal/common/include/arch/x86/rte_rtm.h ++++ b/lib/librte_eal/common/include/arch/x86/rte_rtm.h +@@ -50,11 +50,14 @@ void rte_xend(void) + asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory"); + } + ++#define rte_xabort(x) asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (x) : "memory") ++#if 0 + static __attribute__((__always_inline__)) inline + void rte_xabort(const unsigned int status) + { + asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory"); + } ++#endif + + static __attribute__((__always_inline__)) inline + int rte_xtest(void) +-- +2.7.4 + diff --git a/vlib/vlib/pci/pci_config.h b/vlib/vlib/pci/pci_config.h index 38215d82..9cada51c 100644 --- a/vlib/vlib/pci/pci_config.h +++ b/vlib/vlib/pci/pci_config.h @@ -417,7 +417,7 @@ pci_config_find_capability (pci_config_type0_regs_t * t, int cap_type) while (ttl-- && next_offset >= 0x40) { c = (void *) t + (next_offset &~ 3); - if (c->type == 0xff) + if ((u8) c->type == 0xff) break; if (c->type == cap_type) return c; diff --git a/vlib/vlib/unix/pci.c b/vlib/vlib/unix/pci.c index 75241f3f..b28b542b 100644 --- a/vlib/vlib/unix/pci.c +++ b/vlib/vlib/unix/pci.c @@ -420,7 +420,7 @@ u8 * format_os_pci_handle (u8 * s, va_list * va) l->bus_address.slot, l->bus_address.function); } -static inline pci_device_registration_t * +pci_device_registration_t * __attribute__((unused)) pci_device_next_registered (pci_device_registration_t * r) { uword i; diff --git a/vnet/vnet/ip/adj_alloc.c b/vnet/vnet/ip/adj_alloc.c index 56104207..3ae7a199 100644 --- a/vnet/vnet/ip/adj_alloc.c +++ b/vnet/vnet/ip/adj_alloc.c @@ -51,7 +51,7 @@ aa_alloc (ip_adjacency_t * adjs, ip_adjacency_t **blockp, u32 n) aa_header_t * ah = aa_header (adjs); ip_adjacency_t * adj_block; u32 freelist_length; - int need_barrier_sync; + int need_barrier_sync = 0; ASSERT(os_get_cpu_number() == 0); ASSERT (clib_mem_is_heap_object (_vec_find(ah))); diff --git a/vnet/vnet/lisp-cp/packets.c b/vnet/vnet/lisp-cp/packets.c index aa246a9d..28471b9a 100644 --- a/vnet/vnet/lisp-cp/packets.c +++ b/vnet/vnet/lisp-cp/packets.c @@ -225,7 +225,7 @@ pkt_push_udp_and_ip (vlib_main_t * vm, vlib_buffer_t *b, u16 sp, u16 dp, udpsum = udp_checksum (uh, clib_net_to_host_u16 (uh->length), ih, ip_addr_version(sip)); - if (udpsum == -1) + if (udpsum == (u16) ~0) { clib_warning("Failed UDP checksum! Discarding"); return 0; diff --git a/vnet/vnet/lisp-gpe/decap.c b/vnet/vnet/lisp-gpe/decap.c index 5d88462b..d0848ab5 100644 --- a/vnet/vnet/lisp-gpe/decap.c +++ b/vnet/vnet/lisp-gpe/decap.c @@ -104,7 +104,7 @@ lisp_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node, u32 bi0, bi1; vlib_buffer_t * b0, * b1; ip4_udp_lisp_gpe_header_t * iul0, * iul1; - u32 next0, next1, error0, error1; + u32 next0, next1, error0 = 0, error1 = 0; uword * si0, * si1; next0 = next1 = LISP_GPE_INPUT_NEXT_IP4_INPUT; diff --git a/vnet/vnet/lisp-gpe/lisp_gpe.c b/vnet/vnet/lisp-gpe/lisp_gpe.c index fd1e1a46..a1d4b6fb 100644 --- a/vnet/vnet/lisp-gpe/lisp_gpe.c +++ b/vnet/vnet/lisp-gpe/lisp_gpe.c @@ -721,7 +721,7 @@ add_del_negative_fwd_entry (lisp_gpe_main_t * lgm, /* TODO insert tunnel that always sends map-request */ case DROP: /* for drop fwd entries, just add route, no need to add encap tunnel */ - adj.lookup_next_index = LGPE_IP4_LOOKUP_NEXT_DROP; + adj.lookup_next_index = (u16) LGPE_IP4_LOOKUP_NEXT_DROP; /* add/delete route for prefix */ return ip4_sd_fib_add_del_route (lgm, dpref, spref, a->table_id, &adj, diff --git a/vnet/vnet/vcgn/cnat_global.c b/vnet/vnet/vcgn/cnat_global.c index 9ab89eea..71770834 100644 --- a/vnet/vnet/vcgn/cnat_global.c +++ b/vnet/vnet/vcgn/cnat_global.c @@ -57,7 +57,7 @@ dslite_table_entry_t *dslite_table_db_ptr; */ /* TOBE_PORTED: Following is in cnat_util.c */ -always_inline +always_inline __attribute__((unused)) void ipv4_decr_ttl_n_calc_csum(ipv4_header *ipv4) { u32 checksum; diff --git a/vnet/vnet/vcgn/cnat_ipv4_udp_outside_input.c b/vnet/vnet/vcgn/cnat_ipv4_udp_outside_input.c index f6ffd4ef..5a24a111 100644 --- a/vnet/vnet/vcgn/cnat_ipv4_udp_outside_input.c +++ b/vnet/vnet/vcgn/cnat_ipv4_udp_outside_input.c @@ -137,8 +137,8 @@ is_static_dest_nat_enabled(u16 vrf) return CNAT_NO_CONFIG; }*/ -static inline void swap_ip_dst(ipv4_header *ip, - cnat_main_db_entry_t *db, u16 vrf) +static inline void __attribute__((unused)) +swap_ip_dst(ipv4_header *ip, cnat_main_db_entry_t *db, u16 vrf) { CNAT_UPDATE_L3_CHECKSUM_DECLARE diff --git a/vnet/vnet/vcgn/cnat_logging.c b/vnet/vnet/vcgn/cnat_logging.c index d404c590..50805d11 100644 --- a/vnet/vnet/vcgn/cnat_logging.c +++ b/vnet/vnet/vcgn/cnat_logging.c @@ -2668,7 +2668,7 @@ void cnat_nfv9_ds_lite_log_session_delete( */ -static inline +static inline __attribute__((unused)) void handle_vrfid_name_mapping(void) { cnat_nfv9_logging_info_t *nfv9_logging_info = NULL; diff --git a/vnet/vnet/vcgn/cnat_ports.c b/vnet/vnet/vcgn/cnat_ports.c index 4437865a..943fb3ed 100644 --- a/vnet/vnet/vcgn/cnat_ports.c +++ b/vnet/vnet/vcgn/cnat_ports.c @@ -623,7 +623,7 @@ cnat_dynamic_port_alloc_v2 ( for (i = 0; i < max_trys_to_find_port; i++) { /* start_bit is only a u16.. so it can rollover and become zero */ - if (PREDICT_FALSE((start_bit >= BITS_PER_INST) || + if (PREDICT_FALSE( /* (start_bit >= BITS_PER_INST) || FIXME u16 cannot be >= 65536 */ (start_bit < static_port_range))) { start_bit = static_port_range; #ifndef NO_BULK_LOGGING diff --git a/vnet/vnet/vcgn/cnat_syslog.c b/vnet/vnet/vcgn/cnat_syslog.c index 65fde4ca..91758f14 100644 --- a/vnet/vnet/vcgn/cnat_syslog.c +++ b/vnet/vnet/vcgn/cnat_syslog.c @@ -361,7 +361,7 @@ byte_to_ascii_decimal_unaligned( * in to the space provided and * returns the number of bytes copied */ -inline static int +inline static int __attribute__((unused)) copy_ipv4_addr(unsigned char *ptr, u32 ipv4) { unsigned char *temp = ptr; diff --git a/vpp/vnet/main.c b/vpp/vnet/main.c index 4659440f..2e08aa53 100644 --- a/vpp/vnet/main.c +++ b/vpp/vnet/main.c @@ -67,7 +67,7 @@ int main (int argc, char * argv[]) u32 size; void vlib_set_get_handoff_structure_cb (void *cb); -#if __x86_64__ +#if __x86_64__ && !defined(__clang__) __builtin_cpu_init (); const char * msg = "ERROR: This binary requires CPU with %s extensions.\n"; #define _(a,b) \ -- cgit 1.2.3-korg From 59611303fb70a739544c280177f315bf1508d761 Mon Sep 17 00:00:00 2001 From: Ed Warnicke Date: Thu, 5 May 2016 15:19:49 -0500 Subject: Switch to using nexus.fd.io for dpdk tarball Change-Id: I9751cbb9137627491ee4bd03e0318429327c0bd8 Signed-off-by: Ed Warnicke --- dpdk/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index fadbb268..5d05e6af 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -24,7 +24,7 @@ DPDK_DEBUG ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 16.04 -DPDK_BASE_URL ?= http://dpdk.org/browse/dpdk/snapshot +DPDK_BASE_URL ?= https://nexus.fd.io/content/repositories/thirdparty/ DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.gz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_2.1.0_TARBALL_MD5_CKSUM := 205a0d12bfd6eb717d57506272f43519 -- cgit 1.2.3-korg From 737547efc348c5c60d75a14e04d4aad8499fd3b6 Mon Sep 17 00:00:00 2001 From: Christophe Fontaine Date: Wed, 11 May 2016 08:40:33 +0000 Subject: dpdk/Makefile - Allow dpdk target to be set according to the platform Allows DPDK parameters to be overriden from the platform definition. $(PLATFORM)_dpdk_arch = "armv7a" $(PLATFORM)_dpdk_target = "arm-armv7a-linuxapp-gcc" $(PLATFORM)_dpdk_make_extra_args = "CONFIG_RTE_EAL_IGB_UIO=y" Change-Id: I8c0f233942744cb82ca3ed2d65e33acee845cb4e Signed-off-by: Christophe Fontaine --- build-data/packages/dpdk.mk | 10 ++++++++++ build-data/platforms/arm32.mk | 14 +++++++++++--- dpdk/Makefile | 11 ++++++++++- 3 files changed, 31 insertions(+), 4 deletions(-) (limited to 'dpdk/Makefile') diff --git a/build-data/packages/dpdk.mk b/build-data/packages/dpdk.mk index c7f04346..2bfc4b49 100644 --- a/build-data/packages/dpdk.mk +++ b/build-data/packages/dpdk.mk @@ -17,6 +17,16 @@ DPDK_MAKE_ARGS = -C $(call find_source_fn,$(PACKAGE_SOURCE)) \ DPDK_DEBUG=$(DPDK_DEBUG) +TARGET=$(strip $($(PLATFORM)_dpdk_target)) +ifneq ($(TARGET),) +DPDK_MAKE_ARGS += DPDK_TARGET=$(TARGET) +endif + +DPDK_MAKE_EXTRA_ARGS = $(strip $($(PLATFORM)_dpdk_make_extra_args)) +ifneq ($(DPDK_MAKE_EXTRA_ARGS),) +DPDK_MAKE_ARGS += DPDK_MAKE_EXTRA_ARGS="$(DPDK_MAKE_EXTRA_ARGS)" +endif + dpdk_configure = echo dpdk_make_args = $(DPDK_MAKE_ARGS) config diff --git a/build-data/platforms/arm32.mk b/build-data/platforms/arm32.mk index fecc5bbc..5e1adabf 100644 --- a/build-data/platforms/arm32.mk +++ b/build-data/platforms/arm32.mk @@ -15,16 +15,24 @@ arm32_arch = native arm32_native_tools = vppapigen -arm32_uses_dpdk = no +arm32_uses_dpdk = yes arm32_uses_openssl = no arm32_root_packages = vpp vlib vlib-api vnet svm vpp-api-test \ vpp-japi gmod vlib_configure_args_arm32 = --with-pre-data=128 +vnet_configure_args_arm32 = --with-dpdk --without-vcgn --without-ipsec --without-ipv6sr +vpp_configure_args_arm32 = --with-dpdk --without-vcgn --without-ipsec --without-ipv6sr + +arm32_dpdk_arch = "armv7a" +arm32_dpdk_target = "arm-armv7a-linuxapp-gcc" +arm32_dpdk_make_extra_args = "CPU_CFLAGS='-mfloat-abi=hard' \ + CONFIG_RTE_EAL_IGB_UIO=y \ + CONFIG_RTE_LIBRTE_E1000_PMD=y \ + CONFIG_RTE_MAX_LCORE=4 \ + CONFIG_RTE_MAX_NUMA_NODES=1" -vnet_configure_args_arm32 = --without-vcgn --without-ipsec --without-ipv6sr -vpp_configure_args_arm32 = --without-vcgn --without-ipsec --without-ipv6sr arm32_debug_TAG_CFLAGS = -g -O0 -DCLIB_DEBUG -DFORTIFY_SOURCE=2 -DVLIB_MAX_CPUS=4 -march=armv7-a \ -fstack-protector-all -fPIC -Werror diff --git a/dpdk/Makefile b/dpdk/Makefile index 5d05e6af..165058d9 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -38,7 +38,11 @@ else DPDK_CC=gcc endif + +ifeq (,$(DPDK_TARGET)) DPDK_TARGET := x86_64-native-linuxapp-$(DPDK_CC) +endif + JOBS := $(shell grep processor /proc/cpuinfo | wc -l) # compiler/linker custom arguments @@ -63,6 +67,10 @@ else ifeq ($(DPDK_MARCH),core-avx-i) DPDK_MACHINE:=ivb # Ivy Bridge else ifeq ($(DPDK_MARCH),core-avx2) DPDK_MACHINE:=hsw # Haswell +else ifeq ($(DPDK_MARCH),armv7a) +DPDK_MACHINE:=armv7a # ARMv7 +else ifeq ($(DPDK_MARCH),armv8a) +DPDK_MACHINE:=armv8a # ARMv8 else $(error Unknown DPDK_MARCH) endif @@ -75,7 +83,8 @@ DPDK_MAKE_ARGS := -C $(DPDK_SOURCE) -j $(JOBS) \ EXTRA_CFLAGS="$(DPDK_EXTRA_CFLAGS)" \ EXTRA_LDFLAGS="$(DPDK_EXTRA_LDFLAGS)" \ CPU_CFLAGS="$(DPDK_CPU_CFLAGS)" \ - CPU_LDFLAGS="$(DPDK_CPU_LDFLAGS)" + CPU_LDFLAGS="$(DPDK_CPU_LDFLAGS)" \ + $(DPDK_MAKE_EXTRA_ARGS) DPDK_SOURCE_FILES := $(shell [ -e $(DPDK_SOURCE) ] && find $(DPDK_SOURCE) -name "*.[chS]") -- cgit 1.2.3-korg From 1c80e831b728ab378949714d5059a0b5b1822a0a Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Wed, 11 May 2016 23:07:18 +0200 Subject: Add support for multiple microarchitectures in single binary * compiler -march= parameter is changed from native to corei7 so code is always genereted with instructions which are available on the Nehalem microarchitecture (up to SSE4.2) * compiler -mtune= parameter is added so code is optimized for corei7-avx which equals to Sandy Bridge microarchitecture * set of macros is added which allows run-time detection of available cpu instructions (e.g. clib_cpu_supports_avx()) * set of macros is added which allows us to clone graph node funcitons where cloned function is optmized for different microarchitecture Those macros are using following attributes: __attribute__((flatten)) __attribute__((target("arch=core-avx2))) I.e. If applied to foo_node_fn() macro will generate cloned functions foo_node_fn_avx2() and foo_node_fn_avx512() (future) It will also generate function void * foo_node_fn_multiarch_select() which detects available instruction set and returns pointer to the best matching function clone. Change-Id: I2dce0ac92a5ede95fcb56f47f3d1f3c4c040bac0 Signed-off-by: Damjan Marion --- build-data/packages/dpdk.mk | 6 +++ build-data/platforms/vpp.mk | 7 +++- build-data/platforms/vpp_lite.mk | 6 ++- build-root/Makefile | 5 +++ dpdk/Makefile | 3 +- vlib/vlib/node.h | 27 ++++++++++++ vnet/vnet/classify/ip_classify.c | 4 ++ vnet/vnet/cop/ip4_whitelist.c | 2 + vnet/vnet/cop/ip6_whitelist.c | 2 + vnet/vnet/cop/node1.c | 2 + vnet/vnet/devices/af_packet/device.c | 3 ++ vnet/vnet/devices/af_packet/node.c | 4 +- vnet/vnet/devices/dpdk/cli.c | 8 ++++ vnet/vnet/devices/dpdk/device.c | 3 ++ vnet/vnet/devices/dpdk/dpdk.h | 5 ++- vnet/vnet/devices/dpdk/init.c | 8 ++-- vnet/vnet/devices/dpdk/node.c | 50 +++++++++++++++++++--- vnet/vnet/devices/netmap/device.c | 3 ++ vnet/vnet/devices/netmap/node.c | 3 +- vnet/vnet/devices/ssvm/node.c | 2 + vnet/vnet/devices/ssvm/ssvm_eth.c | 3 ++ vnet/vnet/devices/virtio/vhost-user.c | 5 +++ vnet/vnet/ethernet/node.c | 6 +++ vnet/vnet/gre/gre.c | 3 ++ vnet/vnet/gre/node.c | 2 + vnet/vnet/interface.h | 26 ++++++++++++ vnet/vnet/interface_output.c | 6 +++ vnet/vnet/ip/ip4_forward.c | 18 ++++++++ vnet/vnet/ip/ip4_hop_by_hop.c | 2 + vnet/vnet/ip/ip4_input.c | 4 ++ vnet/vnet/ip/ip4_source_check.c | 6 +++ vnet/vnet/ip/ip6_forward.c | 16 +++++++ vnet/vnet/ip/ip6_hop_by_hop.c | 5 +++ vnet/vnet/ip/ip6_input.c | 2 + vnet/vnet/ip/ip_input_acl.c | 4 ++ vnet/vnet/ip/udp_local.c | 4 ++ vnet/vnet/ipsec/esp_decrypt.c | 2 + vnet/vnet/ipsec/esp_encrypt.c | 2 + vnet/vnet/ipsec/ipsec_if_in.c | 5 ++- vnet/vnet/ipsec/ipsec_if_out.c | 2 + vnet/vnet/ipsec/ipsec_input.c | 7 ++++ vnet/vnet/ipsec/ipsec_output.c | 2 + vnet/vnet/l2/l2_classify.c | 2 + vnet/vnet/l2/l2_efp_filter.c | 2 + vnet/vnet/l2/l2_flood.c | 2 + vnet/vnet/l2/l2_fwd.c | 2 + vnet/vnet/l2/l2_input.c | 2 + vnet/vnet/l2/l2_input_acl.c | 2 + vnet/vnet/l2/l2_input_vtr.c | 2 + vnet/vnet/l2/l2_learn.c | 1 + vnet/vnet/l2/l2_output.c | 2 + vnet/vnet/l2/l2_output_acl.c | 2 + vnet/vnet/l2/l2_patch.c | 2 + vnet/vnet/l2/l2_rw.c | 2 + vnet/vnet/l2/l2_xcrw.c | 2 + vnet/vnet/l2tp/decap.c | 2 + vnet/vnet/l2tp/encap.c | 2 + vnet/vnet/lawful-intercept/node.c | 2 + vnet/vnet/mpls-gre/interface.c | 7 ++++ vnet/vnet/mpls-gre/node.c | 4 ++ vnet/vnet/mpls-gre/policy_encap.c | 2 + vnet/vnet/policer/node_funcs.c | 3 ++ vnet/vnet/sr/sr.c | 6 +++ vnet/vnet/sr/sr_replicate.c | 2 + vnet/vnet/vxlan/decap.c | 5 +++ vnet/vnet/vxlan/encap.c | 3 ++ vpp/app/l2t_ip6.c | 2 + vpp/app/l2t_l2.c | 2 + vpp/app/version.c | 1 + vpp/vnet/main.c | 19 +++++---- vppinfra/vppinfra/cpu.c | 23 ++++++++--- vppinfra/vppinfra/cpu.h | 78 ++++++++++++++++++++++++++++++++++- 72 files changed, 439 insertions(+), 34 deletions(-) (limited to 'dpdk/Makefile') diff --git a/build-data/packages/dpdk.mk b/build-data/packages/dpdk.mk index 2bfc4b49..0a4c1610 100644 --- a/build-data/packages/dpdk.mk +++ b/build-data/packages/dpdk.mk @@ -4,6 +4,11 @@ ifeq ($(DPDK_MARCH),) DPDK_MARCH="native" endif +DPDK_TUNE = $(strip $($(PLATFORM)_mtune)) +ifeq ($(DPDK_TUNE),) + DPDK_MARCH="generic" +endif + ifneq (,$(findstring debug,$(TAG))) DPDK_DEBUG=y else @@ -14,6 +19,7 @@ DPDK_MAKE_ARGS = -C $(call find_source_fn,$(PACKAGE_SOURCE)) \ DPDK_BUILD_DIR=$(PACKAGE_BUILD_DIR) \ DPDK_INSTALL_DIR=$(PACKAGE_INSTALL_DIR) \ DPDK_MARCH=$(DPDK_MARCH) \ + DPDK_TUNE=$(DPDK_TUNE) \ DPDK_DEBUG=$(DPDK_DEBUG) diff --git a/build-data/platforms/vpp.mk b/build-data/platforms/vpp.mk index c381be19..ec0d874d 100644 --- a/build-data/platforms/vpp.mk +++ b/build-data/platforms/vpp.mk @@ -13,6 +13,9 @@ # vector packet processor vpp_arch = native +vpp_march = corei7 # Nehalem Instruction set +vpp_mtune = corei7-avx # Optimize for Sandy Bridge +vpp_dpdk_arch = corei7 vpp_native_tools = vppapigen vpp_uses_dpdk = yes @@ -40,9 +43,9 @@ vpp_debug_TAG_CFLAGS = -g -O0 -DCLIB_DEBUG -DFORTIFY_SOURCE=2 -march=$(MARCH) \ vpp_debug_TAG_LDFLAGS = -g -O0 -DCLIB_DEBUG -DFORTIFY_SOURCE=2 -march=$(MARCH) \ -fstack-protector-all -fPIC -Werror -vpp_TAG_CFLAGS = -g -O2 -DFORTIFY_SOURCE=2 -march=$(MARCH) \ +vpp_TAG_CFLAGS = -g -O2 -DFORTIFY_SOURCE=2 -march=$(MARCH) -mtune=$(MTUNE) \ -fstack-protector -fPIC -Werror -vpp_TAG_LDFLAGS = -g -O2 -DFORTIFY_SOURCE=2 -march=$(MARCH) \ +vpp_TAG_LDFLAGS = -g -O2 -DFORTIFY_SOURCE=2 -march=$(MARCH) -mtune=$(MTUNE) \ -fstack-protector -fPIC -Werror vpp_gcov_TAG_CFLAGS = -g -O0 -DCLIB_DEBUG -march=$(MARCH) \ diff --git a/build-data/platforms/vpp_lite.mk b/build-data/platforms/vpp_lite.mk index 4183700e..d35d2347 100644 --- a/build-data/platforms/vpp_lite.mk +++ b/build-data/platforms/vpp_lite.mk @@ -13,6 +13,8 @@ # vector packet processor vpp_lite_arch = native +vpp_lite_march = corei7 # Nehalem Instruction set +vpp_lite_mtune = corei7-avx # Optimize for Sandy Bridge vpp_lite_native_tools = vppapigen vpp_lite_uses_dpdk = no @@ -30,7 +32,7 @@ vpp_lite_debug_TAG_CFLAGS = -g -O0 -DCLIB_DEBUG -DFORTIFY_SOURCE=2 -march=$(MARC vpp_lite_debug_TAG_LDFLAGS = -g -O0 -DCLIB_DEBUG -DFORTIFY_SOURCE=2 -march=$(MARCH) \ -fstack-protector-all -fPIC -Werror -vpp_lite_TAG_CFLAGS = -g -O2 -DFORTIFY_SOURCE=2 -march=$(MARCH) \ +vpp_lite_TAG_CFLAGS = -g -O2 -DFORTIFY_SOURCE=2 -march=$(MARCH) -mtune=$(MTUNE) \ -fstack-protector -fPIC -Werror -vpp_lite_TAG_LDFLAGS = -g -O2 -DFORTIFY_SOURCE=2 -march=$(MARCH) \ +vpp_lite_TAG_LDFLAGS = -g -O2 -DFORTIFY_SOURCE=2 -march=$(MARCH) -mtune=$(MTUNE) \ -fstack-protector -fPIC -Werror diff --git a/build-root/Makefile b/build-root/Makefile index a60cbf0a..97fb43d4 100644 --- a/build-root/Makefile +++ b/build-root/Makefile @@ -220,6 +220,11 @@ else endif export MARCH +MTUNE = $(strip $($(PLATFORM)_mtune)) +ifeq ($(MTUNE),) + MTUNE = generic +endif + ###################################################################### # Generic build stuff ###################################################################### diff --git a/dpdk/Makefile b/dpdk/Makefile index 165058d9..b7e1e097 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -19,6 +19,7 @@ DPDK_INSTALL_DIR ?= $(CURDIR)/_install DPDK_PKTMBUF_HEADROOM ?= 128 DPDK_DOWNLOAD_DIR ?= $(HOME)/Downloads DPDK_MARCH ?= native +DPDK_TUNE ?= generic DPDK_DEBUG ?= n B := $(DPDK_BUILD_DIR) @@ -51,7 +52,7 @@ DPDK_CPU_LDFLAGS := DPDK_EXTRA_LDFLAGS := -g ifeq ($(DPDK_DEBUG),n) -DPDK_EXTRA_CFLAGS := -g +DPDK_EXTRA_CFLAGS := -g -mtune=$(DPDK_TUNE) else DPDK_EXTRA_CFLAGS := -g -O0 endif diff --git a/vlib/vlib/node.h b/vlib/vlib/node.h index 2caede6e..9b33a0a3 100644 --- a/vlib/vlib/node.h +++ b/vlib/vlib/node.h @@ -40,6 +40,7 @@ #ifndef included_vlib_node_h #define included_vlib_node_h +#include #include #include #include /* for vlib_trace_filter_t */ @@ -149,6 +150,32 @@ static void __vlib_add_node_registration_##x (void) \ } \ __VA_ARGS__ vlib_node_registration_t x +#if CLIB_DEBUG > 0 +#define VLIB_NODE_FUNCTION_CLONE_TEMPLATE(arch, fn) +#define VLIB_NODE_FUNCTION_MULTIARCH_CLONE(fn) +#define VLIB_NODE_FUNCTION_MULTIARCH(node, fn) +#else +#define VLIB_NODE_FUNCTION_CLONE_TEMPLATE(arch, fn, tgt) \ + uword \ + __attribute__ ((flatten)) \ + __attribute__ ((target (tgt))) \ + CLIB_CPU_OPTIMIZED \ + fn ## _ ## arch ( struct vlib_main_t * vm, \ + struct vlib_node_runtime_t * node, \ + struct vlib_frame_t * frame) \ + { return fn (vm, node, frame); } + +#define VLIB_NODE_FUNCTION_MULTIARCH_CLONE(fn) \ + foreach_march_variant(VLIB_NODE_FUNCTION_CLONE_TEMPLATE, fn) + +#define VLIB_NODE_FUNCTION_MULTIARCH(node, fn) \ + VLIB_NODE_FUNCTION_MULTIARCH_CLONE(fn) \ + CLIB_MULTIARCH_SELECT_FN(fn, static inline) \ + static void __attribute__((__constructor__)) \ + __vlib_node_function_multiarch_select_##node (void) \ + { node.function = fn ## _multiarch_select(); } +#endif + always_inline vlib_node_registration_t * vlib_node_next_registered (vlib_node_registration_t * c) { diff --git a/vnet/vnet/classify/ip_classify.c b/vnet/vnet/classify/ip_classify.c index 8152f6ce..75e80ad6 100644 --- a/vnet/vnet/classify/ip_classify.c +++ b/vnet/vnet/classify/ip_classify.c @@ -329,6 +329,8 @@ VLIB_REGISTER_NODE (ip4_classify_node) = { .next_nodes = IP4_LOOKUP_NEXT_NODES, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_classify_node, ip4_classify) + static uword ip6_classify (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -350,6 +352,8 @@ VLIB_REGISTER_NODE (ip6_classify_node) = { .next_nodes = IP6_LOOKUP_NEXT_NODES, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_classify_node, ip6_classify) + static clib_error_t * ip_classify_init (vlib_main_t * vm) { diff --git a/vnet/vnet/cop/ip4_whitelist.c b/vnet/vnet/cop/ip4_whitelist.c index 6178e891..5578558c 100644 --- a/vnet/vnet/cop/ip4_whitelist.c +++ b/vnet/vnet/cop/ip4_whitelist.c @@ -348,6 +348,8 @@ VLIB_REGISTER_NODE (ip4_cop_whitelist_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_cop_whitelist_node, ip4_cop_whitelist_node_fn) + static clib_error_t * ip4_whitelist_init (vlib_main_t * vm) { diff --git a/vnet/vnet/cop/ip6_whitelist.c b/vnet/vnet/cop/ip6_whitelist.c index dc6a1ee5..4a8f33fb 100644 --- a/vnet/vnet/cop/ip6_whitelist.c +++ b/vnet/vnet/cop/ip6_whitelist.c @@ -286,6 +286,8 @@ VLIB_REGISTER_NODE (ip6_cop_whitelist_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_cop_whitelist_node, ip6_cop_whitelist_node_fn) + static clib_error_t * ip6_whitelist_init (vlib_main_t * vm) { diff --git a/vnet/vnet/cop/node1.c b/vnet/vnet/cop/node1.c index 3ee7006a..b448b531 100644 --- a/vnet/vnet/cop/node1.c +++ b/vnet/vnet/cop/node1.c @@ -282,6 +282,8 @@ VLIB_REGISTER_NODE (cop_input_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (cop_input_node, cop_input_node_fn) + #define foreach_cop_stub \ _(default-cop-whitelist, default_cop_whitelist) diff --git a/vnet/vnet/devices/af_packet/device.c b/vnet/vnet/devices/af_packet/device.c index f0e91487..0671d9e2 100644 --- a/vnet/vnet/devices/af_packet/device.c +++ b/vnet/vnet/devices/af_packet/device.c @@ -204,3 +204,6 @@ VNET_DEVICE_CLASS (af_packet_device_class) = { .subif_add_del_function = af_packet_subif_add_del_function, .no_flatten_output_chains = 1, }; + +VLIB_DEVICE_TX_FUNCTION_MULTIARCH (af_packet_device_class, + af_packet_interface_tx) diff --git a/vnet/vnet/devices/af_packet/node.c b/vnet/vnet/devices/af_packet/node.c index b622a221..0c608ea4 100644 --- a/vnet/vnet/devices/af_packet/node.c +++ b/vnet/vnet/devices/af_packet/node.c @@ -267,7 +267,6 @@ af_packet_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, return n_rx_packets; } - VLIB_REGISTER_NODE (af_packet_input_node) = { .function = af_packet_input_fn, .name = "af-packet-input", @@ -283,3 +282,6 @@ VLIB_REGISTER_NODE (af_packet_input_node) = { [AF_PACKET_INPUT_NEXT_ETHERNET_INPUT] = "ethernet-input", }, }; + +VLIB_NODE_FUNCTION_MULTIARCH (af_packet_input_node, af_packet_input_fn) + diff --git a/vnet/vnet/devices/dpdk/cli.c b/vnet/vnet/devices/dpdk/cli.c index 9ec19867..9063cad8 100644 --- a/vnet/vnet/devices/dpdk/cli.c +++ b/vnet/vnet/devices/dpdk/cli.c @@ -759,6 +759,7 @@ set_efd (vlib_main_t *vm, unformat_input_t *input, dpdk_main_t * dm = &dpdk_main; vlib_thread_main_t * tm = vlib_get_thread_main(); clib_error_t * error = NULL; + vlib_node_runtime_t * rt = vlib_node_get_runtime (vm, dpdk_input_node.index); if (unformat(input, "enable")) { if (unformat(input, "dpdk")) { @@ -844,6 +845,13 @@ set_efd (vlib_main_t *vm, unformat_input_t *input, format_unformat_error, input); } + if (dm->efd.enabled) + rt->function = dpdk_input_efd_multiarch_select(); + else if (dm->use_rss) + rt->function = dpdk_input_rss_multiarch_select(); + else + rt->function = dpdk_input_multiarch_select(); + return error; } diff --git a/vnet/vnet/devices/dpdk/device.c b/vnet/vnet/devices/dpdk/device.c index ab85dfb4..a38c8d19 100644 --- a/vnet/vnet/devices/dpdk/device.c +++ b/vnet/vnet/devices/dpdk/device.c @@ -1184,6 +1184,9 @@ VNET_DEVICE_CLASS (dpdk_device_class) = { .name_renumber = dpdk_device_renumber, }; +VLIB_DEVICE_TX_FUNCTION_MULTIARCH (dpdk_device_class, + dpdk_interface_tx) + void dpdk_set_flowcontrol_callback (vlib_main_t *vm, dpdk_flowcontrol_callback_t callback) { diff --git a/vnet/vnet/devices/dpdk/dpdk.h b/vnet/vnet/devices/dpdk/dpdk.h index 019d83f7..525cd8d1 100644 --- a/vnet/vnet/devices/dpdk/dpdk.h +++ b/vnet/vnet/devices/dpdk/dpdk.h @@ -584,8 +584,9 @@ dpdk_pmd_t dpdk_get_pmd_type (vnet_hw_interface_t *hi); i8 dpdk_get_cpu_socket (vnet_hw_interface_t *hi); -uword -dpdk_input_rss (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f); +void * dpdk_input_multiarch_select(); +void * dpdk_input_rss_multiarch_select(); +void * dpdk_input_efd_multiarch_select(); clib_error_t* dpdk_get_hw_interface_stats (u32 hw_if_index, struct rte_eth_stats* dest); diff --git a/vnet/vnet/devices/dpdk/init.c b/vnet/vnet/devices/dpdk/init.c index 63fa4c07..8ee59ff8 100644 --- a/vnet/vnet/devices/dpdk/init.c +++ b/vnet/vnet/devices/dpdk/init.c @@ -804,6 +804,7 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) clib_error_t * error = 0; dpdk_main_t * dm = &dpdk_main; vlib_thread_main_t * tm = vlib_get_thread_main(); + vlib_node_runtime_t * rt = vlib_node_get_runtime (vm, dpdk_input_node.index); u8 * s, * tmp = 0; u8 * pci_dev_id = 0; u8 * rte_cmd = 0, * ethname = 0; @@ -1251,10 +1252,9 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) } if (dm->use_rss) - { - vlib_node_runtime_t * rt = vlib_node_get_runtime (vm, dpdk_input_node.index); - rt->function = dpdk_input_rss; - } + rt->function = dpdk_input_rss_multiarch_select(); + else + rt->function = dpdk_input_multiarch_select(); done: return error; } diff --git a/vnet/vnet/devices/dpdk/node.c b/vnet/vnet/devices/dpdk/node.c index a7590a5c..ca94511b 100644 --- a/vnet/vnet/devices/dpdk/node.c +++ b/vnet/vnet/devices/dpdk/node.c @@ -251,6 +251,8 @@ VLIB_REGISTER_NODE (handoff_dispatch_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (handoff_dispatch_node, handoff_dispatch_node_fn) + clib_error_t *handoff_dispatch_init (vlib_main_t *vm) { handoff_dispatch_main_t * mp = &handoff_dispatch_main; @@ -488,7 +490,8 @@ static inline u32 dpdk_device_input ( dpdk_main_t * dm, dpdk_device_t * xd, vlib_node_runtime_t * node, u32 cpu_index, - u16 queue_id) + u16 queue_id, + int use_efd) { u32 n_buffers; u32 next_index = DPDK_RX_NEXT_ETHERNET_INPUT; @@ -510,7 +513,7 @@ static inline u32 dpdk_device_input ( dpdk_main_t * dm, if (n_buffers == 0) { /* check if EFD (dpdk) is enabled */ - if (PREDICT_FALSE(dm->efd.enabled)) + if (PREDICT_FALSE(use_efd && dm->efd.enabled)) { /* reset a few stats */ xd->efd_agent.last_poll_time = 0; @@ -546,7 +549,7 @@ static inline u32 dpdk_device_input ( dpdk_main_t * dm, /* Check for congestion if EFD (Early-Fast-Discard) is enabled * in any mode (e.g. dpdk, monitor, or drop_all) */ - if (PREDICT_FALSE(dm->efd.enabled)) + if (PREDICT_FALSE(use_efd && dm->efd.enabled)) { /* update EFD counters */ dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled); @@ -793,7 +796,7 @@ dpdk_input (vlib_main_t * vm, { xd = vec_elt_at_index(dm->devices, dq->device); ASSERT(dq->queue_id == 0); - n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, 0); + n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, 0, 0); } VIRL_SPEED_LIMIT() @@ -818,7 +821,7 @@ dpdk_input_rss (vlib_main_t * vm, vec_foreach (dq, dm->devices_by_cpu[cpu_index]) { xd = vec_elt_at_index(dm->devices, dq->device); - n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id); + n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, 0); } VIRL_SPEED_LIMIT() @@ -826,6 +829,32 @@ dpdk_input_rss (vlib_main_t * vm, return n_rx_packets; } +uword +dpdk_input_efd (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * f) +{ + dpdk_main_t * dm = &dpdk_main; + dpdk_device_t * xd; + uword n_rx_packets = 0; + dpdk_device_and_queue_t * dq; + u32 cpu_index = os_get_cpu_number(); + + /* + * Poll all devices on this cpu for input/interrupts. + */ + vec_foreach (dq, dm->devices_by_cpu[cpu_index]) + { + xd = vec_elt_at_index(dm->devices, dq->device); + n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, 1); + } + + VIRL_SPEED_LIMIT() + + return n_rx_packets; +} + + VLIB_REGISTER_NODE (dpdk_input_node) = { .function = dpdk_input, .type = VLIB_NODE_TYPE_INPUT, @@ -850,6 +879,17 @@ VLIB_REGISTER_NODE (dpdk_input_node) = { }, }; + +/* handle dpdk_input_rss alternative function */ +VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input) +VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input_rss) +VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input_efd) + +/* this macro defines dpdk_input_rss_multiarch_select() */ +CLIB_MULTIARCH_SELECT_FN(dpdk_input); +CLIB_MULTIARCH_SELECT_FN(dpdk_input_rss); +CLIB_MULTIARCH_SELECT_FN(dpdk_input_efd); + /* * Override the next nodes for the dpdk input nodes. * Must be invoked prior to VLIB_INIT_FUNCTION calls. diff --git a/vnet/vnet/devices/netmap/device.c b/vnet/vnet/devices/netmap/device.c index f04e0672..a966ffef 100644 --- a/vnet/vnet/devices/netmap/device.c +++ b/vnet/vnet/devices/netmap/device.c @@ -232,3 +232,6 @@ VNET_DEVICE_CLASS (netmap_device_class) = { .subif_add_del_function = netmap_subif_add_del_function, .no_flatten_output_chains = 1, }; + +VLIB_DEVICE_TX_FUNCTION_MULTIARCH(netmap_device_class, + netmap_interface_tx) diff --git a/vnet/vnet/devices/netmap/node.c b/vnet/vnet/devices/netmap/node.c index 3986c7e4..f4c39e62 100644 --- a/vnet/vnet/devices/netmap/node.c +++ b/vnet/vnet/devices/netmap/node.c @@ -270,7 +270,6 @@ netmap_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, return n_rx_packets; } - VLIB_REGISTER_NODE (netmap_input_node) = { .function = netmap_input_fn, .name = "netmap-input", @@ -287,3 +286,5 @@ VLIB_REGISTER_NODE (netmap_input_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (netmap_input_node, netmap_input_fn) + diff --git a/vnet/vnet/devices/ssvm/node.c b/vnet/vnet/devices/ssvm/node.c index 84625696..57b5fd22 100644 --- a/vnet/vnet/devices/ssvm/node.c +++ b/vnet/vnet/devices/ssvm/node.c @@ -334,3 +334,5 @@ VLIB_REGISTER_NODE (ssvm_eth_input_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ssvm_eth_input_node, ssvm_eth_input_node_fn) + diff --git a/vnet/vnet/devices/ssvm/ssvm_eth.c b/vnet/vnet/devices/ssvm/ssvm_eth.c index 49f2d5f8..7c65e212 100644 --- a/vnet/vnet/devices/ssvm/ssvm_eth.c +++ b/vnet/vnet/devices/ssvm/ssvm_eth.c @@ -474,3 +474,6 @@ VNET_DEVICE_CLASS (ssvm_eth_device_class) = { .rx_redirect_to_node = ssvm_eth_set_interface_next_node, .no_flatten_output_chains = 1, }; + +VLIB_DEVICE_TX_FUNCTION_MULTIARCH (ssvm_eth_device_class, + ssvm_eth_interface_tx) diff --git a/vnet/vnet/devices/virtio/vhost-user.c b/vnet/vnet/devices/virtio/vhost-user.c index 5902c42b..ef4993f9 100644 --- a/vnet/vnet/devices/virtio/vhost-user.c +++ b/vnet/vnet/devices/virtio/vhost-user.c @@ -1144,6 +1144,8 @@ VLIB_REGISTER_NODE (vhost_user_input_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (vhost_user_input_node, vhost_user_input) + static uword vhost_user_intfc_tx (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -1373,6 +1375,9 @@ VNET_DEVICE_CLASS (vhost_user_dev_class,static) = { .no_flatten_output_chains = 1, }; +VLIB_DEVICE_TX_FUNCTION_MULTIARCH (vhost_user_dev_class, + vhost_user_intfc_tx) + static uword vhost_user_process (vlib_main_t * vm, vlib_node_runtime_t * rt, diff --git a/vnet/vnet/ethernet/node.c b/vnet/vnet/ethernet/node.c index e8902fd7..226a66e9 100644 --- a/vnet/vnet/ethernet/node.c +++ b/vnet/vnet/ethernet/node.c @@ -945,6 +945,8 @@ VLIB_REGISTER_NODE (ethernet_input_node) = { .unformat_buffer = unformat_ethernet_header, }; +VLIB_NODE_FUNCTION_MULTIARCH (ethernet_input_node, ethernet_input) + VLIB_REGISTER_NODE (ethernet_input_type_node,static) = { .function = ethernet_input_type, .name = "ethernet-input-type", @@ -959,6 +961,8 @@ VLIB_REGISTER_NODE (ethernet_input_type_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ethernet_input_type_node, ethernet_input_type) + VLIB_REGISTER_NODE (ethernet_input_not_l2_node,static) = { .function = ethernet_input_not_l2, .name = "ethernet-input-not-l2", @@ -973,6 +977,8 @@ VLIB_REGISTER_NODE (ethernet_input_not_l2_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ethernet_input_not_l2_node, ethernet_input_not_l2) + void ethernet_set_rx_redirect (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 enable) diff --git a/vnet/vnet/gre/gre.c b/vnet/vnet/gre/gre.c index 075bd6fd..6d375159 100644 --- a/vnet/vnet/gre/gre.c +++ b/vnet/vnet/gre/gre.c @@ -454,6 +454,9 @@ VNET_DEVICE_CLASS (gre_device_class) = { #endif }; +VLIB_DEVICE_TX_FUNCTION_MULTIARCH (gre_device_class, + gre_interface_tx) + VNET_HW_INTERFACE_CLASS (gre_hw_interface_class) = { .name = "GRE", diff --git a/vnet/vnet/gre/node.c b/vnet/vnet/gre/node.c index a91f7e9e..e33b2b05 100644 --- a/vnet/vnet/gre/node.c +++ b/vnet/vnet/gre/node.c @@ -434,6 +434,8 @@ VLIB_REGISTER_NODE (gre_input_node) = { .unformat_buffer = unformat_gre_header, }; +VLIB_NODE_FUNCTION_MULTIARCH (gre_input_node, gre_input) + void gre_register_input_protocol (vlib_main_t * vm, gre_protocol_t protocol, diff --git a/vnet/vnet/interface.h b/vnet/vnet/interface.h index 2829a0cc..30dcf276 100644 --- a/vnet/vnet/interface.h +++ b/vnet/vnet/interface.h @@ -163,6 +163,32 @@ static void __vnet_add_device_class_registration_##x (void) \ } \ __VA_ARGS__ vnet_device_class_t x +#define VLIB_DEVICE_TX_FUNCTION_CLONE_TEMPLATE(arch, fn, tgt) \ + uword \ + __attribute__ ((flatten)) \ + __attribute__ ((target (tgt))) \ + CLIB_CPU_OPTIMIZED \ + fn ## _ ## arch ( vlib_main_t * vm, \ + vlib_node_runtime_t * node, \ + vlib_frame_t * frame) \ + { return fn (vm, node, frame); } + +#define VLIB_DEVICE_TX_FUNCTION_MULTIARCH_CLONE(fn) \ + foreach_march_variant(VLIB_DEVICE_TX_FUNCTION_CLONE_TEMPLATE, fn) + +#if CLIB_DEBUG > 0 +#define VLIB_MULTIARCH_CLONE_AND_SELECT_FN(fn,...) +#define VLIB_DEVICE_TX_FUNCTION_MULTIARCH(dev, fn) +#else +#define VLIB_DEVICE_TX_FUNCTION_MULTIARCH(dev, fn) \ + VLIB_DEVICE_TX_FUNCTION_MULTIARCH_CLONE(fn) \ + CLIB_MULTIARCH_SELECT_FN(fn, static inline) \ + static void __attribute__((__constructor__)) \ + __vlib_device_tx_function_multiarch_select_##dev (void) \ + { dev.tx_function = fn ## _multiarch_select(); } +#endif + + /* Layer-2 (e.g. Ethernet) interface class. */ typedef struct _vnet_hw_interface_class { /* Index into main vector. */ diff --git a/vnet/vnet/interface_output.c b/vnet/vnet/interface_output.c index 04c1b7f5..9f9fb707 100644 --- a/vnet/vnet/interface_output.c +++ b/vnet/vnet/interface_output.c @@ -1140,6 +1140,8 @@ VLIB_REGISTER_NODE (drop_buffers,static) = { .validate_frame = validate_error_frame, }; +VLIB_NODE_FUNCTION_MULTIARCH (drop_buffers, process_drop) + VLIB_REGISTER_NODE (punt_buffers,static) = { .function = process_punt, .flags = (VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH @@ -1150,12 +1152,16 @@ VLIB_REGISTER_NODE (punt_buffers,static) = { .validate_frame = validate_error_frame, }; +VLIB_NODE_FUNCTION_MULTIARCH (punt_buffers, process_punt) + VLIB_REGISTER_NODE (vnet_per_buffer_interface_output_node,static) = { .function = vnet_per_buffer_interface_output, .name = "interface-output", .vector_size = sizeof (u32), }; +VLIB_NODE_FUNCTION_MULTIARCH (vnet_per_buffer_interface_output_node, vnet_per_buffer_interface_output) + clib_error_t * vnet_per_buffer_interface_output_hw_interface_add_del (vnet_main_t * vnm, u32 hw_if_index, diff --git a/vnet/vnet/ip/ip4_forward.c b/vnet/vnet/ip/ip4_forward.c index a84b83bc..ae2f9eea 100644 --- a/vnet/vnet/ip/ip4_forward.c +++ b/vnet/vnet/ip/ip4_forward.c @@ -1354,6 +1354,8 @@ VLIB_REGISTER_NODE (ip4_lookup_node) = { .next_nodes = IP4_LOOKUP_NEXT_NODES, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_lookup_node, ip4_lookup) + static uword ip4_indirect (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -1373,6 +1375,8 @@ VLIB_REGISTER_NODE (ip4_indirect_node) = { .next_nodes = IP4_LOOKUP_NEXT_NODES, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_indirect_node, ip4_indirect) + /* Global IP4 main. */ ip4_main_t ip4_main; @@ -1612,6 +1616,8 @@ VLIB_REGISTER_NODE (ip4_drop_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_drop_node, ip4_drop) + VLIB_REGISTER_NODE (ip4_punt_node,static) = { .function = ip4_punt, .name = "ip4-punt", @@ -1625,6 +1631,8 @@ VLIB_REGISTER_NODE (ip4_punt_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_punt_node, ip4_punt) + VLIB_REGISTER_NODE (ip4_miss_node,static) = { .function = ip4_miss, .name = "ip4-miss", @@ -1638,6 +1646,8 @@ VLIB_REGISTER_NODE (ip4_miss_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_miss_node, ip4_miss) + /* Compute TCP/UDP/ICMP4 checksum in software. */ u16 ip4_tcp_udp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0, @@ -2076,6 +2086,8 @@ VLIB_REGISTER_NODE (ip4_local_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_local_node, ip4_local) + void ip4_register_protocol (u32 protocol, u32 node_index) { vlib_main_t * vm = vlib_get_main(); @@ -2743,6 +2755,8 @@ VLIB_REGISTER_NODE (ip4_rewrite_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_rewrite_node, ip4_rewrite_transit) + VLIB_REGISTER_NODE (ip4_rewrite_local_node,static) = { .function = ip4_rewrite_local, .name = "ip4-rewrite-local", @@ -2759,6 +2773,8 @@ VLIB_REGISTER_NODE (ip4_rewrite_local_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_rewrite_local_node, ip4_rewrite_local) + static clib_error_t * add_del_interface_table (vlib_main_t * vm, unformat_input_t * input, @@ -3032,6 +3048,8 @@ VLIB_REGISTER_NODE (ip4_lookup_multicast_node,static) = { .next_nodes = IP4_LOOKUP_NEXT_NODES, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_lookup_multicast_node, ip4_lookup_multicast) + VLIB_REGISTER_NODE (ip4_multicast_node,static) = { .function = ip4_drop, .name = "ip4-multicast", diff --git a/vnet/vnet/ip/ip4_hop_by_hop.c b/vnet/vnet/ip/ip4_hop_by_hop.c index ae46040a..177feb74 100644 --- a/vnet/vnet/ip/ip4_hop_by_hop.c +++ b/vnet/vnet/ip/ip4_hop_by_hop.c @@ -233,6 +233,8 @@ VLIB_REGISTER_NODE (ip4_hop_by_hop_node) = { .next_nodes = IP4_LOOKUP_NEXT_NODES, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_hop_by_hop_node, ip4_hop_by_hop_node_fn) + VLIB_REGISTER_NODE (ip4_add_hop_by_hop_node) = { .function = ip4_hop_by_hop_node_fn, .name = "ip4-add-hop-by-hop", diff --git a/vnet/vnet/ip/ip4_input.c b/vnet/vnet/ip/ip4_input.c index f31df0f6..60634250 100644 --- a/vnet/vnet/ip/ip4_input.c +++ b/vnet/vnet/ip/ip4_input.c @@ -371,6 +371,8 @@ VLIB_REGISTER_NODE (ip4_input_node) = { .format_trace = format_ip4_input_trace, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_input_node, ip4_input) + VLIB_REGISTER_NODE (ip4_input_no_checksum_node,static) = { .function = ip4_input_no_checksum, .name = "ip4-input-no-checksum", @@ -389,6 +391,8 @@ VLIB_REGISTER_NODE (ip4_input_no_checksum_node,static) = { .format_trace = format_ip4_input_trace, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_input_no_checksum_node, ip4_input_no_checksum) + static clib_error_t * ip4_init (vlib_main_t * vm) { clib_error_t * error; diff --git a/vnet/vnet/ip/ip4_source_check.c b/vnet/vnet/ip/ip4_source_check.c index 47e22f23..11e6678e 100644 --- a/vnet/vnet/ip/ip4_source_check.c +++ b/vnet/vnet/ip/ip4_source_check.c @@ -297,6 +297,9 @@ VLIB_REGISTER_NODE (ip4_check_source_reachable_via_any) = { .format_trace = format_ip4_source_check_trace, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_check_source_reachable_via_any, + ip4_source_check_reachable_via_any) + VLIB_REGISTER_NODE (ip4_check_source_reachable_via_rx) = { .function = ip4_source_check_reachable_via_rx, .name = "ip4-source-check-via-rx", @@ -311,6 +314,9 @@ VLIB_REGISTER_NODE (ip4_check_source_reachable_via_rx) = { .format_trace = format_ip4_source_check_trace, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_check_source_reachable_via_rx, + ip4_source_check_reachable_via_rx) + static clib_error_t * set_ip_source_check (vlib_main_t * vm, unformat_input_t * input, diff --git a/vnet/vnet/ip/ip6_forward.c b/vnet/vnet/ip/ip6_forward.c index a136da3e..7093c571 100644 --- a/vnet/vnet/ip/ip6_forward.c +++ b/vnet/vnet/ip/ip6_forward.c @@ -1260,6 +1260,8 @@ VLIB_REGISTER_NODE (ip6_lookup_node) = { .next_nodes = IP6_LOOKUP_NEXT_NODES, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_lookup_node, ip6_lookup) + static uword ip6_indirect (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -1278,6 +1280,8 @@ VLIB_REGISTER_NODE (ip6_indirect_node) = { .next_nodes = IP6_LOOKUP_NEXT_NODES, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_indirect_node, ip6_indirect) + typedef struct { /* Adjacency taken. */ u32 adj_index; @@ -1454,6 +1458,8 @@ VLIB_REGISTER_NODE (ip6_drop_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_drop_node, ip6_drop) + VLIB_REGISTER_NODE (ip6_punt_node,static) = { .function = ip6_punt, .name = "ip6-punt", @@ -1467,6 +1473,8 @@ VLIB_REGISTER_NODE (ip6_punt_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_punt_node, ip6_punt) + VLIB_REGISTER_NODE (ip6_miss_node,static) = { .function = ip6_miss, .name = "ip6-miss", @@ -1480,6 +1488,8 @@ VLIB_REGISTER_NODE (ip6_miss_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_miss_node, ip6_miss) + VLIB_REGISTER_NODE (ip6_multicast_node,static) = { .function = ip6_drop, .name = "ip6-multicast", @@ -1858,6 +1868,8 @@ VLIB_REGISTER_NODE (ip6_local_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_local_node, ip6_local) + void ip6_register_protocol (u32 protocol, u32 node_index) { vlib_main_t * vm = vlib_get_main(); @@ -2424,6 +2436,8 @@ VLIB_REGISTER_NODE (ip6_rewrite_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_rewrite_node, ip6_rewrite_transit) + VLIB_REGISTER_NODE (ip6_rewrite_local_node,static) = { .function = ip6_rewrite_local, .name = "ip6-rewrite-local", @@ -2439,6 +2453,8 @@ VLIB_REGISTER_NODE (ip6_rewrite_local_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_rewrite_local_node, ip6_rewrite_local) + /* Global IP6 main. */ ip6_main_t ip6_main; diff --git a/vnet/vnet/ip/ip6_hop_by_hop.c b/vnet/vnet/ip/ip6_hop_by_hop.c index 16650dde..045dbc7f 100644 --- a/vnet/vnet/ip/ip6_hop_by_hop.c +++ b/vnet/vnet/ip/ip6_hop_by_hop.c @@ -628,6 +628,8 @@ VLIB_REGISTER_NODE (ip6_hop_by_hop_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_hop_by_hop_node, ip6_hop_by_hop_node_fn) + /* The main h-b-h tracer will be invoked, no need to do much here */ typedef struct { u32 next_index; @@ -849,6 +851,7 @@ VLIB_REGISTER_NODE (ip6_add_hop_by_hop_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_add_hop_by_hop_node, ip6_add_hop_by_hop_node_fn) /* The main h-b-h tracer was already invoked, no need to do much here */ typedef struct { @@ -1162,6 +1165,8 @@ VLIB_REGISTER_NODE (ip6_pop_hop_by_hop_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_pop_hop_by_hop_node, + ip6_pop_hop_by_hop_node_fn) static clib_error_t * ip6_hop_by_hop_init (vlib_main_t * vm) diff --git a/vnet/vnet/ip/ip6_input.c b/vnet/vnet/ip/ip6_input.c index f96a1cfb..2042cbd7 100644 --- a/vnet/vnet/ip/ip6_input.c +++ b/vnet/vnet/ip/ip6_input.c @@ -305,6 +305,8 @@ VLIB_REGISTER_NODE (ip6_input_node) = { .format_trace = format_ip6_input_trace, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_input_node, ip6_input) + static clib_error_t * ip6_init (vlib_main_t * vm) { ethernet_register_input_type (vm, ETHERNET_TYPE_IP6, diff --git a/vnet/vnet/ip/ip_input_acl.c b/vnet/vnet/ip/ip_input_acl.c index e905ed15..fcf8eeab 100644 --- a/vnet/vnet/ip/ip_input_acl.c +++ b/vnet/vnet/ip/ip_input_acl.c @@ -369,6 +369,8 @@ VLIB_REGISTER_NODE (ip4_inacl_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip4_inacl_node, ip4_inacl) + static uword ip6_inacl (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -392,6 +394,8 @@ VLIB_REGISTER_NODE (ip6_inacl_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ip6_inacl_node, ip6_inacl) + static clib_error_t * ip_inacl_init (vlib_main_t * vm) { diff --git a/vnet/vnet/ip/udp_local.c b/vnet/vnet/ip/udp_local.c index 253ad0f3..354dd4e8 100644 --- a/vnet/vnet/ip/udp_local.c +++ b/vnet/vnet/ip/udp_local.c @@ -327,6 +327,8 @@ VLIB_REGISTER_NODE (udp4_input_node) = { .unformat_buffer = unformat_udp_header, }; +VLIB_NODE_FUNCTION_MULTIARCH (udp4_input_node, udp4_input) + VLIB_REGISTER_NODE (udp6_input_node) = { .function = udp6_input, .name = "ip6-udp-lookup", @@ -350,6 +352,8 @@ VLIB_REGISTER_NODE (udp6_input_node) = { .unformat_buffer = unformat_udp_header, }; +VLIB_NODE_FUNCTION_MULTIARCH (udp6_input_node, udp6_input) + static void add_dst_port (udp_main_t * um, udp_dst_port_t dst_port, char * dst_port_name, u8 is_ip4) diff --git a/vnet/vnet/ipsec/esp_decrypt.c b/vnet/vnet/ipsec/esp_decrypt.c index 958a4d67..7af88b23 100644 --- a/vnet/vnet/ipsec/esp_decrypt.c +++ b/vnet/vnet/ipsec/esp_decrypt.c @@ -425,3 +425,5 @@ VLIB_REGISTER_NODE (esp_decrypt_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (esp_decrypt_node, esp_decrypt_node_fn) + diff --git a/vnet/vnet/ipsec/esp_encrypt.c b/vnet/vnet/ipsec/esp_encrypt.c index 39bbf2e4..d1dbcf96 100644 --- a/vnet/vnet/ipsec/esp_encrypt.c +++ b/vnet/vnet/ipsec/esp_encrypt.c @@ -387,3 +387,5 @@ VLIB_REGISTER_NODE (esp_encrypt_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (esp_encrypt_node, esp_encrypt_node_fn) + diff --git a/vnet/vnet/ipsec/ipsec_if_in.c b/vnet/vnet/ipsec/ipsec_if_in.c index 517f8bff..5a8a6858 100644 --- a/vnet/vnet/ipsec/ipsec_if_in.c +++ b/vnet/vnet/ipsec/ipsec_if_in.c @@ -148,4 +148,7 @@ VLIB_REGISTER_NODE (ipsec_if_input_node) = { [IPSEC_IF_INPUT_NEXT_ESP_DECRYPT] = "esp-decrypt", [IPSEC_IF_INPUT_NEXT_DROP] = "error-drop", }, -}; \ No newline at end of file +}; + +VLIB_NODE_FUNCTION_MULTIARCH (ipsec_if_input_node, ipsec_if_input_node_fn) + diff --git a/vnet/vnet/ipsec/ipsec_if_out.c b/vnet/vnet/ipsec/ipsec_if_out.c index 1e1dd528..9573c4f6 100644 --- a/vnet/vnet/ipsec/ipsec_if_out.c +++ b/vnet/vnet/ipsec/ipsec_if_out.c @@ -138,3 +138,5 @@ VLIB_REGISTER_NODE (ipsec_if_output_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ipsec_if_output_node, ipsec_if_output_node_fn) + diff --git a/vnet/vnet/ipsec/ipsec_input.c b/vnet/vnet/ipsec/ipsec_input.c index 3cd60ba1..09acd106 100644 --- a/vnet/vnet/ipsec/ipsec_input.c +++ b/vnet/vnet/ipsec/ipsec_input.c @@ -288,6 +288,9 @@ VLIB_REGISTER_NODE (ipsec_input_ip4_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ipsec_input_ip4_node, + ipsec_input_ip4_node_fn) + static vlib_node_registration_t ipsec_input_ip6_node; @@ -406,3 +409,7 @@ VLIB_REGISTER_NODE (ipsec_input_ip6_node,static) = { #undef _ }, }; + +VLIB_NODE_FUNCTION_MULTIARCH (ipsec_input_ip6_node, + ipsec_input_ip6_node_fn) + diff --git a/vnet/vnet/ipsec/ipsec_output.c b/vnet/vnet/ipsec/ipsec_output.c index 509b1e2f..93554681 100644 --- a/vnet/vnet/ipsec/ipsec_output.c +++ b/vnet/vnet/ipsec/ipsec_output.c @@ -405,6 +405,8 @@ VLIB_REGISTER_NODE (ipsec_output_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (ipsec_output_node, ipsec_output_node_fn) + #else /* IPSEC > 1 */ /* Dummy ipsec output node, in case when IPSec is disabled */ diff --git a/vnet/vnet/l2/l2_classify.c b/vnet/vnet/l2/l2_classify.c index 449dea72..765bc686 100644 --- a/vnet/vnet/l2/l2_classify.c +++ b/vnet/vnet/l2/l2_classify.c @@ -399,6 +399,8 @@ VLIB_REGISTER_NODE (l2_classify_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2_classify_node, l2_classify_node_fn) + clib_error_t *l2_classify_init (vlib_main_t *vm) { l2_classify_main_t * cm = &l2_classify_main; diff --git a/vnet/vnet/l2/l2_efp_filter.c b/vnet/vnet/l2/l2_efp_filter.c index b865c375..17b7eb3f 100644 --- a/vnet/vnet/l2/l2_efp_filter.c +++ b/vnet/vnet/l2/l2_efp_filter.c @@ -498,6 +498,8 @@ VLIB_REGISTER_NODE (l2_efp_filter_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2_efp_filter_node, l2_efp_filter_node_fn) + clib_error_t *l2_efp_filter_init (vlib_main_t *vm) { l2_efp_filter_main_t * mp = &l2_efp_filter_main; diff --git a/vnet/vnet/l2/l2_flood.c b/vnet/vnet/l2/l2_flood.c index ca8c171c..9f71677c 100644 --- a/vnet/vnet/l2/l2_flood.c +++ b/vnet/vnet/l2/l2_flood.c @@ -442,6 +442,8 @@ VLIB_REGISTER_NODE (l2flood_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2flood_node, l2flood_node_fn) + clib_error_t *l2flood_init (vlib_main_t *vm) { l2flood_main_t * mp = &l2flood_main; diff --git a/vnet/vnet/l2/l2_fwd.c b/vnet/vnet/l2/l2_fwd.c index 88a6b69c..5af83a75 100644 --- a/vnet/vnet/l2/l2_fwd.c +++ b/vnet/vnet/l2/l2_fwd.c @@ -367,6 +367,8 @@ VLIB_REGISTER_NODE (l2fwd_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2fwd_node, l2fwd_node_fn) + clib_error_t *l2fwd_init (vlib_main_t *vm) { l2fwd_main_t * mp = &l2fwd_main; diff --git a/vnet/vnet/l2/l2_input.c b/vnet/vnet/l2/l2_input.c index 870aff68..dd3a8b75 100644 --- a/vnet/vnet/l2/l2_input.c +++ b/vnet/vnet/l2/l2_input.c @@ -433,6 +433,8 @@ VLIB_REGISTER_NODE (l2input_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2input_node, l2input_node_fn) + clib_error_t *l2input_init (vlib_main_t *vm) { l2input_main_t * mp = &l2input_main; diff --git a/vnet/vnet/l2/l2_input_acl.c b/vnet/vnet/l2/l2_input_acl.c index 49df0f65..68c005f1 100644 --- a/vnet/vnet/l2/l2_input_acl.c +++ b/vnet/vnet/l2/l2_input_acl.c @@ -371,6 +371,8 @@ VLIB_REGISTER_NODE (l2_inacl_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2_inacl_node, l2_inacl_node_fn) + clib_error_t *l2_inacl_init (vlib_main_t *vm) { l2_inacl_main_t * mp = &l2_inacl_main; diff --git a/vnet/vnet/l2/l2_input_vtr.c b/vnet/vnet/l2/l2_input_vtr.c index bbef33a9..d0f2181b 100644 --- a/vnet/vnet/l2/l2_input_vtr.c +++ b/vnet/vnet/l2/l2_input_vtr.c @@ -293,6 +293,8 @@ VLIB_REGISTER_NODE (l2_invtr_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2_invtr_node, l2_invtr_node_fn) + clib_error_t *l2_invtr_init (vlib_main_t *vm) { l2_invtr_main_t * mp = &l2_invtr_main; diff --git a/vnet/vnet/l2/l2_learn.c b/vnet/vnet/l2/l2_learn.c index a891cb3c..0d94e43d 100644 --- a/vnet/vnet/l2/l2_learn.c +++ b/vnet/vnet/l2/l2_learn.c @@ -415,6 +415,7 @@ VLIB_REGISTER_NODE (l2learn_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2learn_node, l2learn_node_fn) clib_error_t *l2learn_init (vlib_main_t *vm) { diff --git a/vnet/vnet/l2/l2_output.c b/vnet/vnet/l2/l2_output.c index a550d075..f5b22117 100644 --- a/vnet/vnet/l2/l2_output.c +++ b/vnet/vnet/l2/l2_output.c @@ -414,6 +414,8 @@ VLIB_REGISTER_NODE (l2output_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2output_node, l2output_node_fn) + clib_error_t *l2output_init (vlib_main_t *vm) { l2output_main_t * mp = &l2output_main; diff --git a/vnet/vnet/l2/l2_output_acl.c b/vnet/vnet/l2/l2_output_acl.c index f75345f1..b222255f 100644 --- a/vnet/vnet/l2/l2_output_acl.c +++ b/vnet/vnet/l2/l2_output_acl.c @@ -274,6 +274,8 @@ VLIB_REGISTER_NODE (l2_outacl_node,static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2_outacl_node, l2_outacl_node_fn) + clib_error_t *l2_outacl_init (vlib_main_t *vm) { l2_outacl_main_t * mp = &l2_outacl_main; diff --git a/vnet/vnet/l2/l2_patch.c b/vnet/vnet/l2/l2_patch.c index 63be409d..0839142b 100644 --- a/vnet/vnet/l2/l2_patch.c +++ b/vnet/vnet/l2/l2_patch.c @@ -252,6 +252,8 @@ VLIB_REGISTER_NODE (l2_patch_node, static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2_patch_node, l2_patch_node_fn) + int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index, int is_add) { l2_patch_main_t * l2pm = &l2_patch_main; diff --git a/vnet/vnet/l2/l2_rw.c b/vnet/vnet/l2/l2_rw.c index 93580c24..b66a7d34 100644 --- a/vnet/vnet/l2/l2_rw.c +++ b/vnet/vnet/l2/l2_rw.c @@ -604,3 +604,5 @@ VLIB_REGISTER_NODE (l2_rw_node) = { .next_nodes = { [L2_RW_NEXT_DROP] = "error-drop"}, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2_rw_node, l2_rw_node_fn) + diff --git a/vnet/vnet/l2/l2_xcrw.c b/vnet/vnet/l2/l2_xcrw.c index f5fe3ca1..00542a19 100644 --- a/vnet/vnet/l2/l2_xcrw.c +++ b/vnet/vnet/l2/l2_xcrw.c @@ -255,6 +255,8 @@ VLIB_REGISTER_NODE (l2_xcrw_node, static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2_xcrw_node, l2_xcrw_node_fn) + clib_error_t *l2_xcrw_init (vlib_main_t *vm) { l2_xcrw_main_t * mp = &l2_xcrw_main; diff --git a/vnet/vnet/l2tp/decap.c b/vnet/vnet/l2tp/decap.c index 1a2bc489..5f0d05c0 100644 --- a/vnet/vnet/l2tp/decap.c +++ b/vnet/vnet/l2tp/decap.c @@ -247,6 +247,8 @@ VLIB_REGISTER_NODE (l2t_decap_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2t_decap_node, l2t_decap_node_fn) + void l2tp_decap_init (void) { ip6_register_protocol (IP_PROTOCOL_L2TP, l2t_decap_node.index); diff --git a/vnet/vnet/l2tp/encap.c b/vnet/vnet/l2tp/encap.c index 8f26ab00..eca098cb 100644 --- a/vnet/vnet/l2tp/encap.c +++ b/vnet/vnet/l2tp/encap.c @@ -206,6 +206,8 @@ VLIB_REGISTER_NODE (l2t_encap_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2t_encap_node, l2t_encap_node_fn) + void l2tp_encap_init (vlib_main_t * vm) { l2tp_encap_runtime_t * rt; diff --git a/vnet/vnet/lawful-intercept/node.c b/vnet/vnet/lawful-intercept/node.c index 26514cc5..8701c323 100644 --- a/vnet/vnet/lawful-intercept/node.c +++ b/vnet/vnet/lawful-intercept/node.c @@ -272,6 +272,8 @@ VLIB_REGISTER_NODE (li_hit_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (li_hit_node, li_hit_node_fn) + #else #include diff --git a/vnet/vnet/mpls-gre/interface.c b/vnet/vnet/mpls-gre/interface.c index 408ca750..75cd022a 100644 --- a/vnet/vnet/mpls-gre/interface.c +++ b/vnet/vnet/mpls-gre/interface.c @@ -259,6 +259,9 @@ VNET_DEVICE_CLASS (mpls_gre_device_class) = { #endif }; +VLIB_DEVICE_TX_FUNCTION_MULTIARCH (mpls_gre_device_class, + mpls_gre_interface_tx) + VNET_HW_INTERFACE_CLASS (mpls_gre_hw_interface_class) = { .name = "MPLS-GRE", .format_header = format_mpls_gre_header_with_length, @@ -510,6 +513,8 @@ VNET_DEVICE_CLASS (mpls_eth_device_class) = { #endif }; +VLIB_DEVICE_TX_FUNCTION_MULTIARCH (mpls_eth_device_class, + mpls_eth_interface_tx) VNET_HW_INTERFACE_CLASS (mpls_eth_hw_interface_class) = { .name = "MPLS-ETH", @@ -676,6 +681,8 @@ VLIB_REGISTER_NODE (mpls_post_rewrite_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (mpls_post_rewrite_node, mpls_post_rewrite) + static u8 * mpls_gre_rewrite (mpls_main_t *mm, mpls_gre_tunnel_t * t) { ip4_header_t * ip0; diff --git a/vnet/vnet/mpls-gre/node.c b/vnet/vnet/mpls-gre/node.c index 6bf5f814..474e2e2a 100644 --- a/vnet/vnet/mpls-gre/node.c +++ b/vnet/vnet/mpls-gre/node.c @@ -278,6 +278,8 @@ VLIB_REGISTER_NODE (mpls_input_node) = { .unformat_buffer = unformat_mpls_gre_header, }; +VLIB_NODE_FUNCTION_MULTIARCH (mpls_input_node, mpls_input) + static uword mpls_ethernet_input (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -310,6 +312,8 @@ VLIB_REGISTER_NODE (mpls_ethernet_input_node) = { .unformat_buffer = unformat_mpls_gre_header, }; +VLIB_NODE_FUNCTION_MULTIARCH (mpls_ethernet_input_node, mpls_ethernet_input) + static void mpls_setup_nodes (vlib_main_t * vm) { diff --git a/vnet/vnet/mpls-gre/policy_encap.c b/vnet/vnet/mpls-gre/policy_encap.c index b4439193..e3f4beb6 100644 --- a/vnet/vnet/mpls-gre/policy_encap.c +++ b/vnet/vnet/mpls-gre/policy_encap.c @@ -153,6 +153,8 @@ VLIB_REGISTER_NODE (mpls_policy_encap_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (mpls_policy_encap_node, mpls_policy_encap) + static clib_error_t * mpls_policy_encap_init (vlib_main_t * vm) { diff --git a/vnet/vnet/policer/node_funcs.c b/vnet/vnet/policer/node_funcs.c index 8a76e869..739ce455 100644 --- a/vnet/vnet/policer/node_funcs.c +++ b/vnet/vnet/policer/node_funcs.c @@ -324,6 +324,9 @@ VLIB_REGISTER_NODE (policer_by_sw_if_index_node, static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (policer_by_sw_if_index_node, + vnet_policer_by_sw_if_index); + int test_policer_add_del (u32 rx_sw_if_index, u8 *config_name, int is_add) diff --git a/vnet/vnet/sr/sr.c b/vnet/vnet/sr/sr.c index 46318a27..e0ef3181 100644 --- a/vnet/vnet/sr/sr.c +++ b/vnet/vnet/sr/sr.c @@ -653,6 +653,8 @@ VLIB_REGISTER_NODE (sr_rewrite_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (sr_rewrite_node, sr_rewrite) + static int ip6_delete_route_no_next_hop (ip6_address_t *dst_address_arg, u32 dst_address_length, u32 rx_table_id) @@ -1911,6 +1913,8 @@ VLIB_REGISTER_NODE (sr_fix_dst_addr_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (sr_fix_dst_addr_node, sr_fix_dst_addr) + static clib_error_t * sr_init (vlib_main_t * vm) { ip6_sr_main_t * sm = &sr_main; @@ -2560,6 +2564,8 @@ VLIB_REGISTER_NODE (sr_local_node, static) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (sr_local_node, sr_local) + ip6_sr_main_t * sr_get_main (vlib_main_t * vm) { vlib_call_init_function (vm, sr_init); diff --git a/vnet/vnet/sr/sr_replicate.c b/vnet/vnet/sr/sr_replicate.c index 80d04ba6..ccc01465 100644 --- a/vnet/vnet/sr/sr_replicate.c +++ b/vnet/vnet/sr/sr_replicate.c @@ -350,6 +350,8 @@ VLIB_REGISTER_NODE (sr_replicate_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (sr_replicate_node, sr_replicate_node_fn) + clib_error_t *sr_replicate_init (vlib_main_t *vm) { sr_replicate_main_t *msm = &sr_replicate_main; diff --git a/vnet/vnet/vxlan/decap.c b/vnet/vnet/vxlan/decap.c index bd61f4bb..00acd660 100644 --- a/vnet/vnet/vxlan/decap.c +++ b/vnet/vnet/vxlan/decap.c @@ -550,6 +550,8 @@ VLIB_REGISTER_NODE (vxlan4_input_node) = { // $$$$ .unformat_buffer = unformat_vxlan_header, }; +VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_input_node, vxlan4_input) + VLIB_REGISTER_NODE (vxlan6_input_node) = { .function = vxlan6_input, .name = "vxlan6-input", @@ -570,3 +572,6 @@ VLIB_REGISTER_NODE (vxlan6_input_node) = { .format_trace = format_vxlan_rx_trace, // $$$$ .unformat_buffer = unformat_vxlan_header, }; + +VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_input_node, vxlan6_input) + diff --git a/vnet/vnet/vxlan/encap.c b/vnet/vnet/vxlan/encap.c index ee5c7d5e..387a728a 100644 --- a/vnet/vnet/vxlan/encap.c +++ b/vnet/vnet/vxlan/encap.c @@ -586,3 +586,6 @@ VLIB_REGISTER_NODE (vxlan_encap_node) = { [VXLAN_ENCAP_NEXT_DROP] = "error-drop", }, }; + +VLIB_NODE_FUNCTION_MULTIARCH (vxlan_encap_node, vxlan_encap) + diff --git a/vpp/app/l2t_ip6.c b/vpp/app/l2t_ip6.c index ac24886a..8a1e23a9 100644 --- a/vpp/app/l2t_ip6.c +++ b/vpp/app/l2t_ip6.c @@ -274,6 +274,8 @@ static VLIB_REGISTER_NODE (sw6_ip6_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (sw6_ip6_node, ip6_l2t_node_fn) + static clib_error_t * l2tp_config (vlib_main_t * vm, unformat_input_t * input) { diff --git a/vpp/app/l2t_l2.c b/vpp/app/l2t_l2.c index 5c8327d5..f6385a5a 100644 --- a/vpp/app/l2t_l2.c +++ b/vpp/app/l2t_l2.c @@ -250,3 +250,5 @@ VLIB_REGISTER_NODE (l2t_l2_node) = { }, }; +VLIB_NODE_FUNCTION_MULTIARCH (l2t_l2_node, l2t_l2_node_fn) + diff --git a/vpp/app/version.c b/vpp/app/version.c index 588aadf2..22bec400 100644 --- a/vpp/app/version.c +++ b/vpp/app/version.c @@ -59,6 +59,7 @@ show_vpe_version_command_fn (vlib_main_t * vm, _("Compiler", "%s", vpe_compiler); _("CPU model name", "%U", format_cpu_model_name); _("CPU microarchitecture", "%U", format_cpu_uarch); + _("CPU flags", "%U", format_cpu_flags); _("Current PID", "%d", getpid()); #if DPDK > 0 _("DPDK Version", "%s", rte_version()); diff --git a/vpp/vnet/main.c b/vpp/vnet/main.c index f707994b..25400839 100644 --- a/vpp/vnet/main.c +++ b/vpp/vnet/main.c @@ -12,6 +12,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +#include #include #include #include @@ -59,33 +61,32 @@ int main (int argc, char * argv[]) u32 size; void vlib_set_get_handoff_structure_cb (void *cb); -#if __x86_64__ && !defined(__clang__) - __builtin_cpu_init (); +#if __x86_64__ const char * msg = "ERROR: This binary requires CPU with %s extensions.\n"; #define _(a,b) \ - if (!__builtin_cpu_supports(a)) \ + if (!clib_cpu_supports_ ## a ()) \ { \ fprintf(stderr, msg, b); \ exit(1); \ } #if __AVX2__ - _("avx2", "AVX2") + _(avx2, "AVX2") #endif #if __AVX__ - _("avx", "AVX") + _(avx, "AVX") #endif #if __SSE4_2__ - _("sse4.2", "SSE4.2") + _(sse42, "SSE4.2") #endif #if __SSE4_1__ - _("sse4.1", "SSE4.1") + _(sse41, "SSE4.1") #endif #if __SSSE3__ - _("ssse3", "SSSE3") + _(ssse3, "SSSE3") #endif #if __SSE3__ - _("sse3", "SSE3") + _(sse3, "SSE3") #endif #undef _ #endif diff --git a/vppinfra/vppinfra/cpu.c b/vppinfra/vppinfra/cpu.c index f2dbaf1f..9008ee3d 100644 --- a/vppinfra/vppinfra/cpu.c +++ b/vppinfra/vppinfra/cpu.c @@ -16,10 +16,6 @@ #include #include -#if __x86_64__ -#include -#endif - #define foreach_x86_cpu_uarch \ _(0x06, 0x4f, "Broadwell", "Broadwell-EP/EX") \ _(0x06, 0x3d, "Broadwell", "Broadwell") \ @@ -108,4 +104,21 @@ format_cpu_model_name (u8 * s, va_list * args) #else /* ! __x86_64__ */ return format (s, "unknown"); #endif -} \ No newline at end of file +} + +u8 * +format_cpu_flags (u8 * s, va_list * args) +{ +#if __x86_64__ +#define _(flag, func, reg, bit) \ + if (clib_cpu_supports_ ## flag()) \ + s = format (s, #flag " "); + foreach_x86_64_flags + return s; +#undef _ +#else /* ! __x86_64__ */ + return format (s, "unknown"); +#endif +} + + diff --git a/vppinfra/vppinfra/cpu.h b/vppinfra/vppinfra/cpu.h index 79cdf74f..961af709 100644 --- a/vppinfra/vppinfra/cpu.h +++ b/vppinfra/vppinfra/cpu.h @@ -16,7 +16,83 @@ #ifndef included_clib_cpu_h #define included_clib_cpu_h +#include + +/* + * multiarchitecture support. Adding new entry will produce + * new graph node function variant optimized for specific cpu + * microarchitecture. + * Order is important for runtime selection, as 1st match wins... + */ + +#if __x86_64__ && CLIB_DEBUG == 0 +#define foreach_march_variant(macro, x) \ + macro(avx2, x, "arch=core-avx2") +#else +#define foreach_march_variant(macro, x) +#endif + + +#if __GNUC__ > 4 && !__clang__ +#define CLIB_CPU_OPTIMIZED __attribute__ ((optimize ("tree-vectorize"))) +#else +#define CLIB_CPU_OPTIMIZED +#endif + + +#define CLIB_MULTIARCH_ARCH_CHECK(arch, fn, tgt) \ + if (clib_cpu_supports_ ## arch()) \ + return & fn ## _ ##arch; + +#define CLIB_MULTIARCH_SELECT_FN(fn,...) \ + __VA_ARGS__ void * fn ## _multiarch_select(void) \ +{ \ + foreach_march_variant(CLIB_MULTIARCH_ARCH_CHECK, fn) \ + return & fn; \ +} + +#if __x86_64__ +#include "cpuid.h" + +#define foreach_x86_64_flags \ +_ (sse3, 1, ecx, 0) \ +_ (ssse3, 1, ecx, 9) \ +_ (sse41, 1, ecx, 19) \ +_ (sse42, 1, ecx, 20) \ +_ (avx, 1, ecx, 28) \ +_ (avx2, 7, ebx, 5) \ +_ (avx512f, 7, ebx, 16) \ +_ (aes, 1, ecx, 25) \ +_ (sha, 7, ebx, 29) + +static inline int +clib_get_cpuid(const u32 lev, u32 * eax, u32 *ebx, u32 * ecx, u32 * edx) +{ + if ((u32) __get_cpuid_max (0x80000000 & lev, 0) < lev) + return 0; + if (lev == 7) + __cpuid_count(lev, 0, *eax, *ebx, *ecx, *edx); + else + __cpuid(lev, *eax, *ebx, *ecx, *edx); + return 1; +} + + +#define _(flag, func, reg, bit) \ +static inline int \ +clib_cpu_supports_ ## flag() \ +{ \ + u32 __attribute__((unused)) eax, ebx = 0, ecx = 0, edx = 0; \ + clib_get_cpuid (func, &eax, &ebx, &ecx, &edx); \ + \ + return ((reg & (1 << bit)) != 0); \ +} + foreach_x86_64_flags +#undef _ +#endif + format_function_t format_cpu_uarch; format_function_t format_cpu_model_name; +format_function_t format_cpu_flags; -#endif \ No newline at end of file +#endif -- cgit 1.2.3-korg From 96e10b554f4561d343cb1eb588dfa9789e74a6f0 Mon Sep 17 00:00:00 2001 From: Thomas Monjalon Date: Thu, 2 Jun 2016 14:52:32 +0200 Subject: dpdk: download from dpdk.org The tarball repository fast.dpdk.org is fast and reliable enough to be used in VPP build process. Change-Id: Ifaae57d6f8308127b93fc51b2a2a863da5766cd2 Signed-off-by: Thomas Monjalon --- dpdk/Makefile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index b7e1e097..51c7ee5b 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -25,12 +25,12 @@ DPDK_DEBUG ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 16.04 -DPDK_BASE_URL ?= https://nexus.fd.io/content/repositories/thirdparty/ -DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.gz +DPDK_BASE_URL ?= http://fast.dpdk.org/rel +DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) -DPDK_2.1.0_TARBALL_MD5_CKSUM := 205a0d12bfd6eb717d57506272f43519 -DPDK_2.2.0_TARBALL_MD5_CKSUM := 22e2fd68cd5504f43fe9a5a6fd6dd938 -DPDK_16.04_TARBALL_MD5_CKSUM := 0728d506d7f56eb64233e824fa3c098a +DPDK_2.1.0_TARBALL_MD5_CKSUM := a0d69f78f360e03dbdf38ae93fad32d4 +DPDK_2.2.0_TARBALL_MD5_CKSUM := bfa10283b49d56264c82519d130458cc +DPDK_16.04_TARBALL_MD5_CKSUM := d1f82e7d7589b3b2f623c155442b8306 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) ifneq (,$(findstring clang,$(CC))) @@ -151,7 +151,7 @@ download: $(B)/.download.ok $(B)/.extract.ok: $(B)/.download.ok @echo --- extracting $(DPDK_TARBALL) --- - @tar --directory $(B) --extract --file $(CURDIR)/$(DPDK_TARBALL) --gzip + @tar --directory $(B) --extract --file $(CURDIR)/$(DPDK_TARBALL) @touch $@ .PHONY: extract -- cgit 1.2.3-korg From a15568029aecb3c1da582c8662702edd502bee72 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Sun, 26 Jun 2016 20:12:40 +0200 Subject: Enable PCI extended tags in the DPDK config This change should improve DPDK performance on 40G+ NICs with small packets. It also removes ITR setting for i40e devices, allowing driver to set default. Change-Id: I70761b155e48fb0281f7c231516d83027bd16ca2 Signed-off-by: Damjan Marion --- dpdk/Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 51c7ee5b..6296c965 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -108,7 +108,9 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_LIBEAL_USE_HPET,y) $(call set,RTE_BUILD_COMBINE_LIBS,y) $(call set,RTE_LIBRTE_I40E_16BYTE_RX_DESC,y) - $(call set,RTE_LIBRTE_I40E_ITR_INTERVAL,16) + $(call set,RTE_PCI_CONFIG,y) + $(call set,RTE_PCI_EXTENDED_TAG,"on") + $(call set,RTE_PCI_MAX_READ_REQUEST_SIZE,4096) @# enable debug init for device drivers $(call set,RTE_LIBRTE_I40E_DEBUG_INIT,$(DPDK_DEBUG)) $(call set,RTE_LIBRTE_IXGBE_DEBUG_INIT,$(DPDK_DEBUG)) -- cgit 1.2.3-korg From 1f0da170e818a6991d841df47de0885da720b0c3 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Wed, 13 Jul 2016 22:44:18 +0200 Subject: Add DPDK 16.07 support (rc3 based) DPDK vhost-user support is disabled due to significan changes in the DPDK vhost-user code which are not compatible with current VPP code. Change-Id: I3f0d28cb75f6370282ec7e33d57cbfb77e1a3ce1 Signed-off-by: Damjan Marion --- dpdk/Makefile | 2 +- ...Add-packet_type-metadata-in-the-i40e-vPMD.patch | 1210 ++++++++++++++++++++ ...0e-Enable-bad-checksum-flags-in-i40e-vPMD.patch | 111 ++ vlib/vlib/dpdk_buffer.c | 5 + vnet/vnet/devices/dpdk/cli.c | 5 + vnet/vnet/devices/dpdk/device.c | 8 + vnet/vnet/devices/dpdk/dpdk.h | 19 + vnet/vnet/devices/dpdk/dpdk_priv.h | 11 + vnet/vnet/devices/dpdk/format.c | 34 +- vnet/vnet/devices/dpdk/init.c | 8 + vnet/vnet/devices/dpdk/vhost_user.c | 3 + vpp/vpp-api/api.c | 23 +- 12 files changed, 1420 insertions(+), 19 deletions(-) create mode 100644 dpdk/dpdk-16.07-rc3_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch create mode 100644 dpdk/dpdk-16.07-rc3_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 6296c965..6a38ef10 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -28,9 +28,9 @@ DPDK_VERSION ?= 16.04 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) -DPDK_2.1.0_TARBALL_MD5_CKSUM := a0d69f78f360e03dbdf38ae93fad32d4 DPDK_2.2.0_TARBALL_MD5_CKSUM := bfa10283b49d56264c82519d130458cc DPDK_16.04_TARBALL_MD5_CKSUM := d1f82e7d7589b3b2f623c155442b8306 +DPDK_16.07-rc3_TARBALL_MD5_CKSUM := fd59b0c2ab13c6cc7b7c22c2108b33c9 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) ifneq (,$(findstring clang,$(CC))) diff --git a/dpdk/dpdk-16.07-rc3_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch b/dpdk/dpdk-16.07-rc3_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch new file mode 100644 index 00000000..74d9416e --- /dev/null +++ b/dpdk/dpdk-16.07-rc3_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch @@ -0,0 +1,1210 @@ +From 79a2ddaf4d7df7172faa54716ae7647ad7a549b9 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Thu, 14 Jul 2016 09:59:01 -0700 +Subject: [PATCH 1/2] i40e: Add packet_type metadata in the i40e vPMD + +The ptype is decoded from the rx descriptor and stored +in the packet type field in the mbuf using the same function +as the non-vector driver. + +Signed-off-by: Damjan Marion +Signed-off-by: Jeff Shaw +--- + drivers/net/i40e/i40e_rxtx.c | 566 +-------------------------------------- + drivers/net/i40e/i40e_rxtx.h | 563 ++++++++++++++++++++++++++++++++++++++ + drivers/net/i40e/i40e_rxtx_vec.c | 16 ++ + 3 files changed, 581 insertions(+), 564 deletions(-) + +diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c +index d3cfb98..2903347 100644 +--- a/drivers/net/i40e/i40e_rxtx.c ++++ b/drivers/net/i40e/i40e_rxtx.c +@@ -174,569 +174,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword) + } + #endif + +-/* For each value it means, datasheet of hardware can tell more details +- * +- * @note: fix i40e_dev_supported_ptypes_get() if any change here. +- */ +-static inline uint32_t +-i40e_rxd_pkt_type_mapping(uint8_t ptype) +-{ +- static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { +- /* L2 types */ +- /* [0] reserved */ +- [1] = RTE_PTYPE_L2_ETHER, +- [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, +- /* [3] - [5] reserved */ +- [6] = RTE_PTYPE_L2_ETHER_LLDP, +- /* [7] - [10] reserved */ +- [11] = RTE_PTYPE_L2_ETHER_ARP, +- /* [12] - [21] reserved */ +- +- /* Non tunneled IPv4 */ +- [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_FRAG, +- [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_NONFRAG, +- [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_UDP, +- /* [25] reserved */ +- [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_TCP, +- [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_SCTP, +- [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_ICMP, +- +- /* IPv4 --> IPv4 */ +- [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [32] reserved */ +- [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> IPv6 */ +- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [39] reserved */ +- [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN */ +- [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ +- [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [47] reserved */ +- [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ +- [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [54] reserved */ +- [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ +- [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ +- [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [62] reserved */ +- [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ +- [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [69] reserved */ +- [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ +- [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ +- [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [77] reserved */ +- [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ +- [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [84] reserved */ +- [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* Non tunneled IPv6 */ +- [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_FRAG, +- [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_NONFRAG, +- [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_UDP, +- /* [91] reserved */ +- [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_TCP, +- [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_SCTP, +- [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_ICMP, +- +- /* IPv6 --> IPv4 */ +- [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [98] reserved */ +- [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> IPv6 */ +- [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [105] reserved */ +- [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN */ +- [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ +- [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [113] reserved */ +- [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ +- [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [120] reserved */ +- [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ +- [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ +- [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [128] reserved */ +- [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ +- [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [135] reserved */ +- [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ +- [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ +- [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [143] reserved */ +- [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ +- [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [150] reserved */ +- [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* L2 NSH packet type */ +- [154] = RTE_PTYPE_L2_ETHER_NSH, +- [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_FRAG, +- [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_NONFRAG, +- [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_UDP, +- [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_TCP, +- [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_SCTP, +- [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_ICMP, +- [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_FRAG, +- [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_NONFRAG, +- [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_UDP, +- [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_TCP, +- [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_SCTP, +- [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_ICMP, +- +- /* All others reserved */ +- }; +- +- return type_table[ptype]; +-} +- + #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03 + #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01 + #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02 +@@ -2136,7 +1573,8 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) + #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc || + #endif +- dev->rx_pkt_burst == i40e_recv_scattered_pkts) ++ dev->rx_pkt_burst == i40e_recv_scattered_pkts || ++ dev->rx_pkt_burst == i40e_recv_pkts_vec) + return ptypes; + return NULL; + } +diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h +index 98179f0..ecdb13c 100644 +--- a/drivers/net/i40e/i40e_rxtx.h ++++ b/drivers/net/i40e/i40e_rxtx.h +@@ -255,4 +255,567 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev, + struct i40e_tx_queue *txq); + void i40e_set_tx_function(struct rte_eth_dev *dev); + ++/* For each value it means, datasheet of hardware can tell more details ++ * ++ * @note: fix i40e_dev_supported_ptypes_get() if any change here. ++ */ ++static inline uint32_t ++i40e_rxd_pkt_type_mapping(uint8_t ptype) ++{ ++ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { ++ /* L2 types */ ++ /* [0] reserved */ ++ [1] = RTE_PTYPE_L2_ETHER, ++ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, ++ /* [3] - [5] reserved */ ++ [6] = RTE_PTYPE_L2_ETHER_LLDP, ++ /* [7] - [10] reserved */ ++ [11] = RTE_PTYPE_L2_ETHER_ARP, ++ /* [12] - [21] reserved */ ++ ++ /* Non tunneled IPv4 */ ++ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_FRAG, ++ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_NONFRAG, ++ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_UDP, ++ /* [25] reserved */ ++ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_TCP, ++ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_SCTP, ++ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_ICMP, ++ ++ /* IPv4 --> IPv4 */ ++ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [32] reserved */ ++ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> IPv6 */ ++ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [39] reserved */ ++ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN */ ++ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ ++ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [47] reserved */ ++ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ ++ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [54] reserved */ ++ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ ++ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ ++ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [62] reserved */ ++ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ ++ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [69] reserved */ ++ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ ++ [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ ++ [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [77] reserved */ ++ [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ ++ [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [84] reserved */ ++ [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* Non tunneled IPv6 */ ++ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_FRAG, ++ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_NONFRAG, ++ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_UDP, ++ /* [91] reserved */ ++ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_TCP, ++ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_SCTP, ++ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_ICMP, ++ ++ /* IPv6 --> IPv4 */ ++ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [98] reserved */ ++ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> IPv6 */ ++ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [105] reserved */ ++ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN */ ++ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ ++ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [113] reserved */ ++ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ ++ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [120] reserved */ ++ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ ++ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ ++ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [128] reserved */ ++ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ ++ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [135] reserved */ ++ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ ++ [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ ++ [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [143] reserved */ ++ [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ ++ [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [150] reserved */ ++ [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* L2 NSH packet type */ ++ [154] = RTE_PTYPE_L2_ETHER_NSH, ++ [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_FRAG, ++ [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_NONFRAG, ++ [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_UDP, ++ [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_TCP, ++ [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_SCTP, ++ [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_ICMP, ++ [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_FRAG, ++ [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_NONFRAG, ++ [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_UDP, ++ [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_TCP, ++ [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_SCTP, ++ [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_ICMP, ++ ++ /* All others reserved */ ++ }; ++ ++ return type_table[ptype]; ++} ++ + #endif /* _I40E_RXTX_H_ */ +diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c +index 05cb415..e78ac63 100644 +--- a/drivers/net/i40e/i40e_rxtx_vec.c ++++ b/drivers/net/i40e/i40e_rxtx_vec.c +@@ -187,6 +187,21 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) + + #define PKTLEN_SHIFT 10 + ++static inline void ++desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) ++{ ++ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]); ++ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); ++ ++ ptype0 = _mm_srli_epi64(ptype0, 30); ++ ptype1 = _mm_srli_epi64(ptype1, 30); ++ ++ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0)); ++ rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8)); ++ rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0)); ++ rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8)); ++} ++ + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet +@@ -393,6 +408,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + pkt_mb2); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); ++ desc_to_ptype_v(descs, &rx_pkts[pos]); + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; +-- +2.7.4 + diff --git a/dpdk/dpdk-16.07-rc3_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch b/dpdk/dpdk-16.07-rc3_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch new file mode 100644 index 00000000..58256f19 --- /dev/null +++ b/dpdk/dpdk-16.07-rc3_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch @@ -0,0 +1,111 @@ +From 5917bd1cf9857979a7cae89f362d2c885f09d034 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Thu, 14 Jul 2016 09:59:02 -0700 +Subject: [PATCH 2/2] i40e: Enable bad checksum flags in i40e vPMD + +Decode the checksum flags from the rx descriptor, setting +the appropriate bit in the mbuf ol_flags field when the flag +indicates a bad checksum. + +Signed-off-by: Damjan Marion +Signed-off-by: Jeff Shaw +--- + drivers/net/i40e/i40e_rxtx_vec.c | 48 +++++++++++++++++++++++----------------- + 1 file changed, 28 insertions(+), 20 deletions(-) + +diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c +index e78ac63..ace51df 100644 +--- a/drivers/net/i40e/i40e_rxtx_vec.c ++++ b/drivers/net/i40e/i40e_rxtx_vec.c +@@ -138,19 +138,14 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq) + static inline void + desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) + { +- __m128i vlan0, vlan1, rss; +- union { +- uint16_t e[4]; +- uint64_t dword; +- } vol; ++ __m128i vlan0, vlan1, rss, l3_l4e; + + /* mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. + */ +- const __m128i rss_vlan_msk = _mm_set_epi16( +- 0x0000, 0x0000, 0x0000, 0x0000, +- 0x3804, 0x3804, 0x3804, 0x3804); ++ const __m128i rss_vlan_msk = _mm_set_epi32( ++ 0x1c03004, 0x1c03004, 0x1c03004, 0x1c03004); + + /* map rss and vlan type to rss hash and vlan flag */ + const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, +@@ -163,23 +158,36 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, + 0, 0, PKT_RX_FDIR, 0); + +- vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]); +- vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]); +- vlan0 = _mm_unpacklo_epi32(vlan0, vlan1); ++ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, ++ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, ++ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, ++ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, ++ PKT_RX_EIP_CKSUM_BAD, ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, ++ PKT_RX_L4_CKSUM_BAD, ++ PKT_RX_IP_CKSUM_BAD, ++ 0); ++ ++ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]); ++ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]); ++ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1); + + vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); + vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); + +- rss = _mm_srli_epi16(vlan1, 11); ++ rss = _mm_srli_epi32(vlan1, 12); + rss = _mm_shuffle_epi8(rss_flags, rss); + ++ l3_l4e = _mm_srli_epi32(vlan1, 22); ++ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e); ++ + vlan0 = _mm_or_si128(vlan0, rss); +- vol.dword = _mm_cvtsi128_si64(vlan0); ++ vlan0 = _mm_or_si128(vlan0, l3_l4e); + +- rx_pkts[0]->ol_flags = vol.e[0]; +- rx_pkts[1]->ol_flags = vol.e[1]; +- rx_pkts[2]->ol_flags = vol.e[2]; +- rx_pkts[3]->ol_flags = vol.e[3]; ++ rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0); ++ rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2); ++ rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4); ++ rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6); + } + #else + #define desc_to_olflags_v(desc, rx_pkts) do {} while (0) +@@ -754,7 +762,8 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) + #ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE + /* whithout rx ol_flags, no VP flag report */ + if (rxmode->hw_vlan_strip != 0 || +- rxmode->hw_vlan_extend != 0) ++ rxmode->hw_vlan_extend != 0 || ++ rxmode->hw_ip_checksum != 0) + return -1; + #endif + +@@ -765,8 +774,7 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) + /* - no csum error report support + * - no header split support + */ +- if (rxmode->hw_ip_checksum == 1 || +- rxmode->header_split == 1) ++ if (rxmode->header_split == 1) + return -1; + + return 0; +-- +2.7.4 + diff --git a/vlib/vlib/dpdk_buffer.c b/vlib/vlib/dpdk_buffer.c index 7e6e8424..84bca0f5 100644 --- a/vlib/vlib/dpdk_buffer.c +++ b/vlib/vlib/dpdk_buffer.c @@ -62,6 +62,7 @@ #include #include #include +#include #include @@ -989,7 +990,11 @@ vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, if (rmp) { new_start = pointer_to_uword (rmp); +#if RTE_VERSION >= RTE_VERSION_NUM(16, 7, 0, 0) + new_size = (uintptr_t)STAILQ_FIRST(&rmp->mem_list)->addr + STAILQ_FIRST(&rmp->mem_list)->len - new_start; +#else new_size = rmp->elt_va_end - new_start; +#endif if (vpm->virtual.size > 0) { diff --git a/vnet/vnet/devices/dpdk/cli.c b/vnet/vnet/devices/dpdk/cli.c index bcc3d2f9..3b0befd5 100644 --- a/vnet/vnet/devices/dpdk/cli.c +++ b/vnet/vnet/devices/dpdk/cli.c @@ -167,8 +167,13 @@ show_dpdk_buffer (vlib_main_t * vm, unformat_input_t * input, rmp = vm->buffer_main->pktmbuf_pools[i]; if (rmp) { +#if RTE_VERSION >= RTE_VERSION_NUM(16, 7, 0, 0) + unsigned count = rte_mempool_avail_count(rmp); + unsigned free_count = rte_mempool_in_use_count(rmp); +#else unsigned count = rte_mempool_count(rmp); unsigned free_count = rte_mempool_free_count(rmp); +#endif vlib_cli_output(vm, "name=\"%s\" available = %7d allocated = %7d total = %7d\n", rmp->name, (u32)count, (u32)free_count, diff --git a/vnet/vnet/devices/dpdk/device.c b/vnet/vnet/devices/dpdk/device.c index 27e35229..66d4b6d4 100644 --- a/vnet/vnet/devices/dpdk/device.c +++ b/vnet/vnet/devices/dpdk/device.c @@ -355,6 +355,7 @@ u32 tx_burst_vector_internal (vlib_main_t * vm, n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0; } } +#if DPDK_VHOST_USER else if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER) { u32 offset = 0; @@ -450,6 +451,7 @@ u32 tx_burst_vector_internal (vlib_main_t * vm, if (xd->need_txlock) *xd->lockp[queue_id] = 0; } +#endif #if RTE_LIBRTE_KNI else if (xd->dev_type == VNET_DPDK_DEV_KNI) { @@ -878,6 +880,7 @@ dpdk_interface_tx (vlib_main_t * vm, static int dpdk_device_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance) { +#if DPDK_VHOST_USER dpdk_main_t * dm = &dpdk_main; dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance); @@ -888,6 +891,7 @@ static int dpdk_device_renumber (vnet_hw_interface_t * hi, } xd->vu_if_id = new_dev_instance; +#endif return 0; } @@ -926,6 +930,7 @@ static void dpdk_clear_hw_interface_counters (u32 instance) memset (&xd->last_stats, 0, sizeof (xd->last_stats)); } +#if DPDK_VHOST_USER if (PREDICT_FALSE(xd->dev_type == VNET_DPDK_DEV_VHOST_USER)) { int i; for (i = 0; i < xd->rx_q_used * VIRTIO_QNUM; i++) { @@ -933,6 +938,7 @@ static void dpdk_clear_hw_interface_counters (u32 instance) xd->vu_intf->vrings[i].bytes = 0; } } +#endif } #ifdef RTE_LIBRTE_KNI @@ -1027,6 +1033,7 @@ dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) return 0; } #endif +#if DPDK_VHOST_USER if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER) { if (is_up) @@ -1045,6 +1052,7 @@ dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) return 0; } +#endif if (is_up) diff --git a/vnet/vnet/devices/dpdk/dpdk.h b/vnet/vnet/devices/dpdk/dpdk.h index 00caeafa..830bf102 100644 --- a/vnet/vnet/devices/dpdk/dpdk.h +++ b/vnet/vnet/devices/dpdk/dpdk.h @@ -61,6 +61,12 @@ #define always_inline static inline __attribute__ ((__always_inline__)) #endif +#if RTE_VERSION < RTE_VERSION_NUM(16, 7, 0, 0) +#define DPDK_VHOST_USER 1 +#else +#define DPDK_VHOST_USER 0 +#endif + #include #define NB_MBUF (16<<10) @@ -131,6 +137,7 @@ typedef struct { u32 total_packet_cnt; } dpdk_efd_agent_t; +#if DPDK_VHOST_USER typedef struct { int callfd; int kickfd; @@ -160,6 +167,7 @@ typedef struct { u32 region_fd[VHOST_MEMORY_MAX_NREGIONS]; u64 region_offset[VHOST_MEMORY_MAX_NREGIONS]; } dpdk_vu_intf_t; +#endif typedef void (*dpdk_flowcontrol_callback_t) (vlib_main_t *vm, u32 hw_if_index, @@ -225,11 +233,13 @@ typedef struct { struct rte_kni *kni; u8 kni_port_id; +#if DPDK_VHOST_USER /* vhost-user related */ u32 vu_if_id; struct virtio_net vu_vhost_dev; u32 vu_is_running; dpdk_vu_intf_t *vu_intf; +#endif /* af_packet */ u8 af_packet_port_id; @@ -240,8 +250,13 @@ typedef struct { struct rte_eth_stats stats; struct rte_eth_stats last_stats; struct rte_eth_stats last_cleared_stats; +#if RTE_VERSION >= RTE_VERSION_NUM(16, 7, 0, 0) + struct rte_eth_xstat * xstats; + struct rte_eth_xstat * last_cleared_xstats; +#else struct rte_eth_xstats * xstats; struct rte_eth_xstats * last_cleared_xstats; +#endif f64 time_last_stats_update; dpdk_port_type_t port_type; @@ -509,10 +524,12 @@ u32 is_efd_discardable(vlib_thread_main_t *tm, vlib_buffer_t * b0, struct rte_mbuf *mb); +#if DPDK_VHOST_USER /* dpdk vhost-user interrupt management */ u8 dpdk_vhost_user_want_interrupt (dpdk_device_t *xd, int idx); void dpdk_vhost_user_send_interrupt (vlib_main_t * vm, dpdk_device_t * xd, int idx); +#endif static inline u64 vnet_get_aggregate_rx_packets (void) @@ -544,6 +561,7 @@ void efd_config(u32 enabled, void post_sw_interface_set_flags (vlib_main_t *vm, u32 sw_if_index, u32 flags); +#if DPDK_VHOST_USER typedef struct vhost_user_memory vhost_user_memory_t; void dpdk_vhost_user_process_init (void **ctx); @@ -568,6 +586,7 @@ int dpdk_vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm, u32 sw_if_index); int dpdk_vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm, vhost_user_intf_details_t **out_vuids); +#endif u32 dpdk_get_admin_up_down_in_progress (void); diff --git a/vnet/vnet/devices/dpdk/dpdk_priv.h b/vnet/vnet/devices/dpdk/dpdk_priv.h index c6be7e7f..ac499432 100644 --- a/vnet/vnet/devices/dpdk/dpdk_priv.h +++ b/vnet/vnet/devices/dpdk/dpdk_priv.h @@ -23,6 +23,15 @@ #define DPDK_NB_TX_DESC_40GE 1024 #define DPDK_NB_RX_DESC_ENIC 1024 +#if RTE_VERSION >= RTE_VERSION_NUM(16, 7, 0, 0) +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_VF 0x154C +#endif + /* These args appear by themselves */ #define foreach_eal_double_hyphen_predicate_arg \ _(no-shconf) \ @@ -72,6 +81,7 @@ dpdk_rx_burst ( dpdk_main_t * dm, dpdk_device_t * xd, u16 queue_id) break; } } +#if DPDK_VHOST_USER else if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER) { vlib_main_t * vm = vlib_get_main(); @@ -129,6 +139,7 @@ dpdk_rx_burst ( dpdk_main_t * dm, dpdk_device_t * xd, u16 queue_id) } } +#endif #ifdef RTE_LIBRTE_KNI else if (xd->dev_type == VNET_DPDK_DEV_KNI) { diff --git a/vnet/vnet/devices/dpdk/format.c b/vnet/vnet/devices/dpdk/format.c index 25591c2e..4c553655 100644 --- a/vnet/vnet/devices/dpdk/format.c +++ b/vnet/vnet/devices/dpdk/format.c @@ -28,16 +28,11 @@ _ (tx_frames_ok, opackets) \ _ (tx_bytes_ok, obytes) \ _ (tx_errors, oerrors) \ - _ (tx_loopback_frames_ok, olbpackets) \ - _ (tx_loopback_bytes_ok, olbbytes) \ _ (rx_frames_ok, ipackets) \ _ (rx_bytes_ok, ibytes) \ _ (rx_errors, ierrors) \ _ (rx_missed, imissed) \ - _ (rx_multicast_frames_ok, imcasts) \ - _ (rx_no_bufs, rx_nombuf) \ - _ (rx_loopback_frames_ok, ilbpackets) \ - _ (rx_loopback_bytes_ok, ilbbytes) + _ (rx_no_bufs, rx_nombuf) #define foreach_dpdk_q_counter \ _ (rx_frames_ok, q_ipackets) \ @@ -174,9 +169,11 @@ u8 * format_dpdk_device_name (u8 * s, va_list * args) return format(s, "kni%d", dm->devices[i].kni_port_id); } else #endif +#if DPDK_VHOST_USER if (dm->devices[i].dev_type == VNET_DPDK_DEV_VHOST_USER) { return format(s, "VirtualEthernet0/0/%d", dm->devices[i].vu_if_id); } +#endif switch (dm->devices[i].port_type) { case VNET_DPDK_PORT_TYPE_ETH_1G: @@ -463,7 +460,6 @@ u8 * format_dpdk_device (u8 * s, va_list * args) format_white_space, indent + 2, xd->cpu_socket); /* $$$ MIB counters */ - { #define _(N, V) \ if ((xd->stats.V - xd->last_cleared_stats.V) != 0) { \ @@ -479,21 +475,33 @@ u8 * format_dpdk_device (u8 * s, va_list * args) u8 * xs = 0; u32 i = 0; +#if RTE_VERSION < RTE_VERSION_NUM(16, 7, 0, 0) + struct rte_eth_xstats * xstat, * last_xstat; +#else + struct rte_eth_xstat * xstat, * last_xstat; + struct rte_eth_xstat_name * xstat_names = 0; + int len = rte_eth_xstats_get_names (xd->device_index, NULL, 0); + vec_validate (xstat_names, len - 1); + rte_eth_xstats_get_names (xd->device_index, xstat_names, len); +#endif ASSERT(vec_len(xd->xstats) == vec_len(xd->last_cleared_xstats)); vec_foreach_index(i, xd->xstats) { u64 delta = 0; - struct rte_eth_xstats* xstat = vec_elt_at_index(xd->xstats, i); - struct rte_eth_xstats* last_xstat = - vec_elt_at_index(xd->last_cleared_xstats, i); + xstat = vec_elt_at_index(xd->xstats, i); + last_xstat = vec_elt_at_index(xd->last_cleared_xstats, i); delta = xstat->value - last_xstat->value; if (verbose == 2 || (verbose && delta)) { /* format_c_identifier doesn't like c strings inside vector */ +#if RTE_VERSION < RTE_VERSION_NUM(16, 7, 0, 0) u8 * name = format(0,"%s", xstat->name); +#else + u8 * name = format(0,"%s", xstat_names[i].name); +#endif xs = format(xs, "\n%U%-38U%16Ld", format_white_space, indent + 4, format_c_identifier, name, delta); @@ -501,6 +509,11 @@ u8 * format_dpdk_device (u8 * s, va_list * args) } } +#if RTE_VERSION >= RTE_VERSION_NUM(16, 7, 0, 0) + vec_free (xstat_names); +#endif + +#if DPDK_VHOST_USER if (verbose && xd->dev_type == VNET_DPDK_DEV_VHOST_USER) { int i; for (i = 0; i < xd->rx_q_used * VIRTIO_QNUM; i++) { @@ -528,6 +541,7 @@ u8 * format_dpdk_device (u8 * s, va_list * args) } } } +#endif if (xs) { diff --git a/vnet/vnet/devices/dpdk/init.c b/vnet/vnet/devices/dpdk/init.c index 178ff31b..816b20b3 100644 --- a/vnet/vnet/devices/dpdk/init.c +++ b/vnet/vnet/devices/dpdk/init.c @@ -1539,7 +1539,9 @@ dpdk_process (vlib_main_t * vm, ethernet_main_t * em = ðernet_main; dpdk_device_t * xd; vlib_thread_main_t * tm = vlib_get_thread_main(); +#if DPDK_VHOST_USER void *vu_state; +#endif int i; error = dpdk_lib_init (dm); @@ -1564,7 +1566,9 @@ dpdk_process (vlib_main_t * vm, if (error) clib_error_report (error); +#if DPDK_VHOST_USER dpdk_vhost_user_process_init(&vu_state); +#endif tm->worker_thread_release = 1; @@ -1654,13 +1658,17 @@ dpdk_process (vlib_main_t * vm, if ((now - xd->time_last_link_update) >= dm->link_state_poll_interval) dpdk_update_link_state (xd, now); +#if DPDK_VHOST_USER if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER) if (dpdk_vhost_user_process_if(vm, xd, vu_state) != 0) continue; +#endif } } +#if DPDK_VHOST_USER dpdk_vhost_user_process_cleanup(vu_state); +#endif return 0; } diff --git a/vnet/vnet/devices/dpdk/vhost_user.c b/vnet/vnet/devices/dpdk/vhost_user.c index 2d9462f0..212313a7 100644 --- a/vnet/vnet/devices/dpdk/vhost_user.c +++ b/vnet/vnet/devices/dpdk/vhost_user.c @@ -39,6 +39,8 @@ #define DBG_SOCK(args...) #endif +#if DPDK_VHOST_USER + static const char *vhost_message_str[] __attribute__((unused)) = { [VHOST_USER_NONE] = "VHOST_USER_NONE", [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES", @@ -1917,3 +1919,4 @@ VLIB_CLI_COMMAND (show_vhost_user_command, static) = { .short_help = "show vhost-user interface", .function = show_dpdk_vhost_user_command_fn, }; +#endif diff --git a/vpp/vpp-api/api.c b/vpp/vpp-api/api.c index 6433664b..b47dc9e8 100644 --- a/vpp/vpp-api/api.c +++ b/vpp/vpp-api/api.c @@ -3791,9 +3791,9 @@ vl_api_l2_interface_vlan_tag_rewrite_t_handler (vl_api_l2_interface_vlan_tag_rew static void vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t *mp) { -#if DPDK > 0 int rv = 0; vl_api_create_vhost_user_if_reply_t * rmp; +#if DPDK > 0 && DPDK_VHOST_USER u32 sw_if_index = (u32)~0; vnet_main_t * vnm = vnet_get_main(); @@ -3808,15 +3808,18 @@ vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t *mp) ({ rmp->sw_if_index = ntohl (sw_if_index); })); +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; + REPLY_MACRO(VL_API_CREATE_VHOST_USER_IF_REPLY); #endif } static void vl_api_modify_vhost_user_if_t_handler (vl_api_modify_vhost_user_if_t *mp) { -#if DPDK > 0 int rv = 0; vl_api_modify_vhost_user_if_reply_t * rmp; +#if DPDK > 0 && DPDK_VHOST_USER u32 sw_if_index = ntohl(mp->sw_if_index); vnet_main_t * vnm = vnet_get_main(); @@ -3825,18 +3828,19 @@ vl_api_modify_vhost_user_if_t_handler (vl_api_modify_vhost_user_if_t *mp) rv = dpdk_vhost_user_modify_if(vnm, vm, (char *)mp->sock_filename, mp->is_server, sw_if_index, (u64)~0, mp->renumber, ntohl(mp->custom_dev_instance)); - - REPLY_MACRO(VL_API_MODIFY_VHOST_USER_IF_REPLY); +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; #endif + REPLY_MACRO(VL_API_MODIFY_VHOST_USER_IF_REPLY); } static void vl_api_delete_vhost_user_if_t_handler (vl_api_delete_vhost_user_if_t *mp) { -#if DPDK > 0 int rv = 0; - vpe_api_main_t * vam = &vpe_api_main; vl_api_delete_vhost_user_if_reply_t * rmp; +#if DPDK > 0 && DPDK_VHOST_USER + vpe_api_main_t * vam = &vpe_api_main; u32 sw_if_index = ntohl(mp->sw_if_index); vnet_main_t * vnm = vnet_get_main(); @@ -3853,6 +3857,9 @@ vl_api_delete_vhost_user_if_t_handler (vl_api_delete_vhost_user_if_t *mp) send_sw_interface_flags_deleted (vam, q, sw_if_index); } +#else + rv = VNET_API_ERROR_UNIMPLEMENTED; + REPLY_MACRO(VL_API_DELETE_VHOST_USER_IF_REPLY); #endif } @@ -3862,7 +3869,7 @@ static void vl_api_sw_interface_vhost_user_details_t_handler ( clib_warning ("BUG"); } -#if DPDK > 0 +#if DPDK > 0 && DPDK_VHOST_USER static void send_sw_interface_vhost_user_details (vpe_api_main_t * am, unix_shared_memory_queue_t *q, vhost_user_intf_details_t * vui, @@ -3894,7 +3901,7 @@ static void vl_api_sw_interface_vhost_user_dump_t_handler ( vl_api_sw_interface_vhost_user_dump_t *mp) { -#if DPDK > 0 +#if DPDK > 0 && DPDK_VHOST_USER int rv = 0; vpe_api_main_t * am = &vpe_api_main; vnet_main_t * vnm = vnet_get_main(); -- cgit 1.2.3-korg From 734d291c2e587b4760984cf75a50912c69106bdf Mon Sep 17 00:00:00 2001 From: Sachin Date: Tue, 2 Aug 2016 11:35:48 +0530 Subject: VPP: Fixed dpdk-16.07 BAD checksum error - Updated tarball name and checksum value Change-Id: If06581285e506859bd1edb516b690e963c7de5d0 Signed-off-by: Sachin --- dpdk/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 6a38ef10..576b1ee6 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -30,7 +30,7 @@ DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_2.2.0_TARBALL_MD5_CKSUM := bfa10283b49d56264c82519d130458cc DPDK_16.04_TARBALL_MD5_CKSUM := d1f82e7d7589b3b2f623c155442b8306 -DPDK_16.07-rc3_TARBALL_MD5_CKSUM := fd59b0c2ab13c6cc7b7c22c2108b33c9 +DPDK_16.07_TARBALL_MD5_CKSUM := 690a2bb570103e58d12f9806e8bf21be DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) ifneq (,$(findstring clang,$(CC))) -- cgit 1.2.3-korg From 3389bbcad02e6d1824742be9fcbb74abdefa3ce5 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Tue, 2 Aug 2016 13:31:31 -0400 Subject: VPP-180 Clean up multi-socket / multi-chunk mempool discovery Change the default DPDK version to 16.07, and rename the indicated patch directory. Use the native vhost-user driver. Change-Id: Ie3d17e90e363ce86f0233b58c152de683b5d9456 Signed-off-by: Dave Barach --- dpdk/Makefile | 3 +- ...Add-packet_type-metadata-in-the-i40e-vPMD.patch | 1210 -------------------- ...0e-Enable-bad-checksum-flags-in-i40e-vPMD.patch | 111 -- ...Add-packet_type-metadata-in-the-i40e-vPMD.patch | 1210 ++++++++++++++++++++ ...0e-Enable-bad-checksum-flags-in-i40e-vPMD.patch | 111 ++ vlib/vlib/dpdk_buffer.c | 78 +- vpp/vpp-api/api.c | 4 +- 7 files changed, 1397 insertions(+), 1330 deletions(-) delete mode 100644 dpdk/dpdk-16.07-rc3_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch delete mode 100644 dpdk/dpdk-16.07-rc3_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch create mode 100644 dpdk/dpdk-16.07_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch create mode 100644 dpdk/dpdk-16.07_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 576b1ee6..d73ddb62 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -24,11 +24,10 @@ DPDK_DEBUG ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) -DPDK_VERSION ?= 16.04 +DPDK_VERSION ?= 16.07 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) -DPDK_2.2.0_TARBALL_MD5_CKSUM := bfa10283b49d56264c82519d130458cc DPDK_16.04_TARBALL_MD5_CKSUM := d1f82e7d7589b3b2f623c155442b8306 DPDK_16.07_TARBALL_MD5_CKSUM := 690a2bb570103e58d12f9806e8bf21be DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) diff --git a/dpdk/dpdk-16.07-rc3_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch b/dpdk/dpdk-16.07-rc3_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch deleted file mode 100644 index 74d9416e..00000000 --- a/dpdk/dpdk-16.07-rc3_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch +++ /dev/null @@ -1,1210 +0,0 @@ -From 79a2ddaf4d7df7172faa54716ae7647ad7a549b9 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Thu, 14 Jul 2016 09:59:01 -0700 -Subject: [PATCH 1/2] i40e: Add packet_type metadata in the i40e vPMD - -The ptype is decoded from the rx descriptor and stored -in the packet type field in the mbuf using the same function -as the non-vector driver. - -Signed-off-by: Damjan Marion -Signed-off-by: Jeff Shaw ---- - drivers/net/i40e/i40e_rxtx.c | 566 +-------------------------------------- - drivers/net/i40e/i40e_rxtx.h | 563 ++++++++++++++++++++++++++++++++++++++ - drivers/net/i40e/i40e_rxtx_vec.c | 16 ++ - 3 files changed, 581 insertions(+), 564 deletions(-) - -diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c -index d3cfb98..2903347 100644 ---- a/drivers/net/i40e/i40e_rxtx.c -+++ b/drivers/net/i40e/i40e_rxtx.c -@@ -174,569 +174,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword) - } - #endif - --/* For each value it means, datasheet of hardware can tell more details -- * -- * @note: fix i40e_dev_supported_ptypes_get() if any change here. -- */ --static inline uint32_t --i40e_rxd_pkt_type_mapping(uint8_t ptype) --{ -- static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { -- /* L2 types */ -- /* [0] reserved */ -- [1] = RTE_PTYPE_L2_ETHER, -- [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, -- /* [3] - [5] reserved */ -- [6] = RTE_PTYPE_L2_ETHER_LLDP, -- /* [7] - [10] reserved */ -- [11] = RTE_PTYPE_L2_ETHER_ARP, -- /* [12] - [21] reserved */ -- -- /* Non tunneled IPv4 */ -- [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_FRAG, -- [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_NONFRAG, -- [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_UDP, -- /* [25] reserved */ -- [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_TCP, -- [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_SCTP, -- [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_ICMP, -- -- /* IPv4 --> IPv4 */ -- [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [32] reserved */ -- [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> IPv6 */ -- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [39] reserved */ -- [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN */ -- [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ -- [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [47] reserved */ -- [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ -- [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [54] reserved */ -- [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ -- [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -- [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [62] reserved */ -- [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -- [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [69] reserved */ -- [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -- [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -- [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [77] reserved */ -- [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -- [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [84] reserved */ -- [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* Non tunneled IPv6 */ -- [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_FRAG, -- [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_NONFRAG, -- [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_UDP, -- /* [91] reserved */ -- [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_TCP, -- [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_SCTP, -- [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_ICMP, -- -- /* IPv6 --> IPv4 */ -- [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [98] reserved */ -- [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> IPv6 */ -- [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [105] reserved */ -- [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN */ -- [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ -- [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [113] reserved */ -- [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ -- [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [120] reserved */ -- [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ -- [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -- [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [128] reserved */ -- [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -- [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [135] reserved */ -- [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -- [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -- [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [143] reserved */ -- [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -- [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [150] reserved */ -- [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* L2 NSH packet type */ -- [154] = RTE_PTYPE_L2_ETHER_NSH, -- [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_FRAG, -- [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_NONFRAG, -- [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_UDP, -- [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_TCP, -- [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_SCTP, -- [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_ICMP, -- [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_FRAG, -- [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_NONFRAG, -- [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_UDP, -- [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_TCP, -- [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_SCTP, -- [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_ICMP, -- -- /* All others reserved */ -- }; -- -- return type_table[ptype]; --} -- - #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03 - #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01 - #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02 -@@ -2136,7 +1573,8 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) - #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC - dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc || - #endif -- dev->rx_pkt_burst == i40e_recv_scattered_pkts) -+ dev->rx_pkt_burst == i40e_recv_scattered_pkts || -+ dev->rx_pkt_burst == i40e_recv_pkts_vec) - return ptypes; - return NULL; - } -diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h -index 98179f0..ecdb13c 100644 ---- a/drivers/net/i40e/i40e_rxtx.h -+++ b/drivers/net/i40e/i40e_rxtx.h -@@ -255,4 +255,567 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev, - struct i40e_tx_queue *txq); - void i40e_set_tx_function(struct rte_eth_dev *dev); - -+/* For each value it means, datasheet of hardware can tell more details -+ * -+ * @note: fix i40e_dev_supported_ptypes_get() if any change here. -+ */ -+static inline uint32_t -+i40e_rxd_pkt_type_mapping(uint8_t ptype) -+{ -+ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { -+ /* L2 types */ -+ /* [0] reserved */ -+ [1] = RTE_PTYPE_L2_ETHER, -+ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, -+ /* [3] - [5] reserved */ -+ [6] = RTE_PTYPE_L2_ETHER_LLDP, -+ /* [7] - [10] reserved */ -+ [11] = RTE_PTYPE_L2_ETHER_ARP, -+ /* [12] - [21] reserved */ -+ -+ /* Non tunneled IPv4 */ -+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ /* [25] reserved */ -+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ -+ /* IPv4 --> IPv4 */ -+ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [32] reserved */ -+ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> IPv6 */ -+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [39] reserved */ -+ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN */ -+ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ -+ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [47] reserved */ -+ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ -+ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [54] reserved */ -+ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ -+ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -+ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [62] reserved */ -+ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -+ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [69] reserved */ -+ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -+ [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -+ [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [77] reserved */ -+ [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -+ [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [84] reserved */ -+ [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* Non tunneled IPv6 */ -+ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ /* [91] reserved */ -+ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ -+ /* IPv6 --> IPv4 */ -+ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [98] reserved */ -+ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> IPv6 */ -+ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [105] reserved */ -+ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN */ -+ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ -+ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [113] reserved */ -+ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ -+ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [120] reserved */ -+ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ -+ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -+ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [128] reserved */ -+ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -+ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [135] reserved */ -+ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -+ [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -+ [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [143] reserved */ -+ [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -+ [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [150] reserved */ -+ [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* L2 NSH packet type */ -+ [154] = RTE_PTYPE_L2_ETHER_NSH, -+ [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ -+ /* All others reserved */ -+ }; -+ -+ return type_table[ptype]; -+} -+ - #endif /* _I40E_RXTX_H_ */ -diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c -index 05cb415..e78ac63 100644 ---- a/drivers/net/i40e/i40e_rxtx_vec.c -+++ b/drivers/net/i40e/i40e_rxtx_vec.c -@@ -187,6 +187,21 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) - - #define PKTLEN_SHIFT 10 - -+static inline void -+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) -+{ -+ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]); -+ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); -+ -+ ptype0 = _mm_srli_epi64(ptype0, 30); -+ ptype1 = _mm_srli_epi64(ptype1, 30); -+ -+ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0)); -+ rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8)); -+ rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0)); -+ rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8)); -+} -+ - /* - * Notice: - * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet -@@ -393,6 +408,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, - pkt_mb2); - _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, - pkt_mb1); -+ desc_to_ptype_v(descs, &rx_pkts[pos]); - /* C.4 calc avaialbe number of desc */ - var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); - nb_pkts_recd += var; --- -2.7.4 - diff --git a/dpdk/dpdk-16.07-rc3_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch b/dpdk/dpdk-16.07-rc3_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch deleted file mode 100644 index 58256f19..00000000 --- a/dpdk/dpdk-16.07-rc3_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch +++ /dev/null @@ -1,111 +0,0 @@ -From 5917bd1cf9857979a7cae89f362d2c885f09d034 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Thu, 14 Jul 2016 09:59:02 -0700 -Subject: [PATCH 2/2] i40e: Enable bad checksum flags in i40e vPMD - -Decode the checksum flags from the rx descriptor, setting -the appropriate bit in the mbuf ol_flags field when the flag -indicates a bad checksum. - -Signed-off-by: Damjan Marion -Signed-off-by: Jeff Shaw ---- - drivers/net/i40e/i40e_rxtx_vec.c | 48 +++++++++++++++++++++++----------------- - 1 file changed, 28 insertions(+), 20 deletions(-) - -diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c -index e78ac63..ace51df 100644 ---- a/drivers/net/i40e/i40e_rxtx_vec.c -+++ b/drivers/net/i40e/i40e_rxtx_vec.c -@@ -138,19 +138,14 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq) - static inline void - desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) - { -- __m128i vlan0, vlan1, rss; -- union { -- uint16_t e[4]; -- uint64_t dword; -- } vol; -+ __m128i vlan0, vlan1, rss, l3_l4e; - - /* mask everything except RSS, flow director and VLAN flags - * bit2 is for VLAN tag, bit11 for flow director indication - * bit13:12 for RSS indication. - */ -- const __m128i rss_vlan_msk = _mm_set_epi16( -- 0x0000, 0x0000, 0x0000, 0x0000, -- 0x3804, 0x3804, 0x3804, 0x3804); -+ const __m128i rss_vlan_msk = _mm_set_epi32( -+ 0x1c03004, 0x1c03004, 0x1c03004, 0x1c03004); - - /* map rss and vlan type to rss hash and vlan flag */ - const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, -@@ -163,23 +158,36 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) - PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, - 0, 0, PKT_RX_FDIR, 0); - -- vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]); -- vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]); -- vlan0 = _mm_unpacklo_epi32(vlan0, vlan1); -+ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD, -+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_L4_CKSUM_BAD, -+ PKT_RX_IP_CKSUM_BAD, -+ 0); -+ -+ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]); -+ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]); -+ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1); - - vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); - vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); - -- rss = _mm_srli_epi16(vlan1, 11); -+ rss = _mm_srli_epi32(vlan1, 12); - rss = _mm_shuffle_epi8(rss_flags, rss); - -+ l3_l4e = _mm_srli_epi32(vlan1, 22); -+ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e); -+ - vlan0 = _mm_or_si128(vlan0, rss); -- vol.dword = _mm_cvtsi128_si64(vlan0); -+ vlan0 = _mm_or_si128(vlan0, l3_l4e); - -- rx_pkts[0]->ol_flags = vol.e[0]; -- rx_pkts[1]->ol_flags = vol.e[1]; -- rx_pkts[2]->ol_flags = vol.e[2]; -- rx_pkts[3]->ol_flags = vol.e[3]; -+ rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0); -+ rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2); -+ rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4); -+ rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6); - } - #else - #define desc_to_olflags_v(desc, rx_pkts) do {} while (0) -@@ -754,7 +762,8 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) - #ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE - /* whithout rx ol_flags, no VP flag report */ - if (rxmode->hw_vlan_strip != 0 || -- rxmode->hw_vlan_extend != 0) -+ rxmode->hw_vlan_extend != 0 || -+ rxmode->hw_ip_checksum != 0) - return -1; - #endif - -@@ -765,8 +774,7 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) - /* - no csum error report support - * - no header split support - */ -- if (rxmode->hw_ip_checksum == 1 || -- rxmode->header_split == 1) -+ if (rxmode->header_split == 1) - return -1; - - return 0; --- -2.7.4 - diff --git a/dpdk/dpdk-16.07_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch b/dpdk/dpdk-16.07_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch new file mode 100644 index 00000000..74d9416e --- /dev/null +++ b/dpdk/dpdk-16.07_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch @@ -0,0 +1,1210 @@ +From 79a2ddaf4d7df7172faa54716ae7647ad7a549b9 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Thu, 14 Jul 2016 09:59:01 -0700 +Subject: [PATCH 1/2] i40e: Add packet_type metadata in the i40e vPMD + +The ptype is decoded from the rx descriptor and stored +in the packet type field in the mbuf using the same function +as the non-vector driver. + +Signed-off-by: Damjan Marion +Signed-off-by: Jeff Shaw +--- + drivers/net/i40e/i40e_rxtx.c | 566 +-------------------------------------- + drivers/net/i40e/i40e_rxtx.h | 563 ++++++++++++++++++++++++++++++++++++++ + drivers/net/i40e/i40e_rxtx_vec.c | 16 ++ + 3 files changed, 581 insertions(+), 564 deletions(-) + +diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c +index d3cfb98..2903347 100644 +--- a/drivers/net/i40e/i40e_rxtx.c ++++ b/drivers/net/i40e/i40e_rxtx.c +@@ -174,569 +174,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword) + } + #endif + +-/* For each value it means, datasheet of hardware can tell more details +- * +- * @note: fix i40e_dev_supported_ptypes_get() if any change here. +- */ +-static inline uint32_t +-i40e_rxd_pkt_type_mapping(uint8_t ptype) +-{ +- static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { +- /* L2 types */ +- /* [0] reserved */ +- [1] = RTE_PTYPE_L2_ETHER, +- [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, +- /* [3] - [5] reserved */ +- [6] = RTE_PTYPE_L2_ETHER_LLDP, +- /* [7] - [10] reserved */ +- [11] = RTE_PTYPE_L2_ETHER_ARP, +- /* [12] - [21] reserved */ +- +- /* Non tunneled IPv4 */ +- [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_FRAG, +- [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_NONFRAG, +- [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_UDP, +- /* [25] reserved */ +- [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_TCP, +- [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_SCTP, +- [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_ICMP, +- +- /* IPv4 --> IPv4 */ +- [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [32] reserved */ +- [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> IPv6 */ +- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [39] reserved */ +- [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN */ +- [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ +- [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [47] reserved */ +- [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ +- [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [54] reserved */ +- [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ +- [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ +- [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [62] reserved */ +- [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ +- [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [69] reserved */ +- [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ +- [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ +- [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [77] reserved */ +- [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ +- [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [84] reserved */ +- [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* Non tunneled IPv6 */ +- [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_FRAG, +- [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_NONFRAG, +- [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_UDP, +- /* [91] reserved */ +- [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_TCP, +- [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_SCTP, +- [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_ICMP, +- +- /* IPv6 --> IPv4 */ +- [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [98] reserved */ +- [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> IPv6 */ +- [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [105] reserved */ +- [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_IP | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN */ +- [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ +- [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [113] reserved */ +- [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ +- [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [120] reserved */ +- [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ +- [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ +- [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [128] reserved */ +- [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ +- [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [135] reserved */ +- [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ +- [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ +- [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [143] reserved */ +- [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ +- [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_FRAG, +- [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_NONFRAG, +- [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_UDP, +- /* [150] reserved */ +- [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_TCP, +- [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_SCTP, +- [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_TUNNEL_GRENAT | +- RTE_PTYPE_INNER_L2_ETHER_VLAN | +- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_INNER_L4_ICMP, +- +- /* L2 NSH packet type */ +- [154] = RTE_PTYPE_L2_ETHER_NSH, +- [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_FRAG, +- [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_NONFRAG, +- [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_UDP, +- [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_TCP, +- [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_SCTP, +- [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | +- RTE_PTYPE_L4_ICMP, +- [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_FRAG, +- [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_NONFRAG, +- [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_UDP, +- [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_TCP, +- [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_SCTP, +- [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | +- RTE_PTYPE_L4_ICMP, +- +- /* All others reserved */ +- }; +- +- return type_table[ptype]; +-} +- + #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03 + #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01 + #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02 +@@ -2136,7 +1573,8 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) + #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc || + #endif +- dev->rx_pkt_burst == i40e_recv_scattered_pkts) ++ dev->rx_pkt_burst == i40e_recv_scattered_pkts || ++ dev->rx_pkt_burst == i40e_recv_pkts_vec) + return ptypes; + return NULL; + } +diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h +index 98179f0..ecdb13c 100644 +--- a/drivers/net/i40e/i40e_rxtx.h ++++ b/drivers/net/i40e/i40e_rxtx.h +@@ -255,4 +255,567 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev, + struct i40e_tx_queue *txq); + void i40e_set_tx_function(struct rte_eth_dev *dev); + ++/* For each value it means, datasheet of hardware can tell more details ++ * ++ * @note: fix i40e_dev_supported_ptypes_get() if any change here. ++ */ ++static inline uint32_t ++i40e_rxd_pkt_type_mapping(uint8_t ptype) ++{ ++ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { ++ /* L2 types */ ++ /* [0] reserved */ ++ [1] = RTE_PTYPE_L2_ETHER, ++ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, ++ /* [3] - [5] reserved */ ++ [6] = RTE_PTYPE_L2_ETHER_LLDP, ++ /* [7] - [10] reserved */ ++ [11] = RTE_PTYPE_L2_ETHER_ARP, ++ /* [12] - [21] reserved */ ++ ++ /* Non tunneled IPv4 */ ++ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_FRAG, ++ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_NONFRAG, ++ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_UDP, ++ /* [25] reserved */ ++ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_TCP, ++ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_SCTP, ++ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_ICMP, ++ ++ /* IPv4 --> IPv4 */ ++ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [32] reserved */ ++ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> IPv6 */ ++ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [39] reserved */ ++ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN */ ++ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ ++ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [47] reserved */ ++ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ ++ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [54] reserved */ ++ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ ++ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ ++ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [62] reserved */ ++ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ ++ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [69] reserved */ ++ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ ++ [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ ++ [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [77] reserved */ ++ [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ ++ [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [84] reserved */ ++ [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* Non tunneled IPv6 */ ++ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_FRAG, ++ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_NONFRAG, ++ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_UDP, ++ /* [91] reserved */ ++ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_TCP, ++ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_SCTP, ++ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_ICMP, ++ ++ /* IPv6 --> IPv4 */ ++ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [98] reserved */ ++ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> IPv6 */ ++ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [105] reserved */ ++ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_IP | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN */ ++ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ ++ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [113] reserved */ ++ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ ++ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [120] reserved */ ++ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ ++ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ ++ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [128] reserved */ ++ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ ++ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [135] reserved */ ++ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ ++ [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ ++ [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [143] reserved */ ++ [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ ++ [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_FRAG, ++ [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_NONFRAG, ++ [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_UDP, ++ /* [150] reserved */ ++ [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_TCP, ++ [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_SCTP, ++ [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_TUNNEL_GRENAT | ++ RTE_PTYPE_INNER_L2_ETHER_VLAN | ++ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_INNER_L4_ICMP, ++ ++ /* L2 NSH packet type */ ++ [154] = RTE_PTYPE_L2_ETHER_NSH, ++ [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_FRAG, ++ [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_NONFRAG, ++ [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_UDP, ++ [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_TCP, ++ [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_SCTP, ++ [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | ++ RTE_PTYPE_L4_ICMP, ++ [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_FRAG, ++ [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_NONFRAG, ++ [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_UDP, ++ [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_TCP, ++ [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_SCTP, ++ [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | ++ RTE_PTYPE_L4_ICMP, ++ ++ /* All others reserved */ ++ }; ++ ++ return type_table[ptype]; ++} ++ + #endif /* _I40E_RXTX_H_ */ +diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c +index 05cb415..e78ac63 100644 +--- a/drivers/net/i40e/i40e_rxtx_vec.c ++++ b/drivers/net/i40e/i40e_rxtx_vec.c +@@ -187,6 +187,21 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) + + #define PKTLEN_SHIFT 10 + ++static inline void ++desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) ++{ ++ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]); ++ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); ++ ++ ptype0 = _mm_srli_epi64(ptype0, 30); ++ ptype1 = _mm_srli_epi64(ptype1, 30); ++ ++ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0)); ++ rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8)); ++ rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0)); ++ rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8)); ++} ++ + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet +@@ -393,6 +408,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + pkt_mb2); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); ++ desc_to_ptype_v(descs, &rx_pkts[pos]); + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; +-- +2.7.4 + diff --git a/dpdk/dpdk-16.07_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch b/dpdk/dpdk-16.07_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch new file mode 100644 index 00000000..58256f19 --- /dev/null +++ b/dpdk/dpdk-16.07_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch @@ -0,0 +1,111 @@ +From 5917bd1cf9857979a7cae89f362d2c885f09d034 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Thu, 14 Jul 2016 09:59:02 -0700 +Subject: [PATCH 2/2] i40e: Enable bad checksum flags in i40e vPMD + +Decode the checksum flags from the rx descriptor, setting +the appropriate bit in the mbuf ol_flags field when the flag +indicates a bad checksum. + +Signed-off-by: Damjan Marion +Signed-off-by: Jeff Shaw +--- + drivers/net/i40e/i40e_rxtx_vec.c | 48 +++++++++++++++++++++++----------------- + 1 file changed, 28 insertions(+), 20 deletions(-) + +diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c +index e78ac63..ace51df 100644 +--- a/drivers/net/i40e/i40e_rxtx_vec.c ++++ b/drivers/net/i40e/i40e_rxtx_vec.c +@@ -138,19 +138,14 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq) + static inline void + desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) + { +- __m128i vlan0, vlan1, rss; +- union { +- uint16_t e[4]; +- uint64_t dword; +- } vol; ++ __m128i vlan0, vlan1, rss, l3_l4e; + + /* mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. + */ +- const __m128i rss_vlan_msk = _mm_set_epi16( +- 0x0000, 0x0000, 0x0000, 0x0000, +- 0x3804, 0x3804, 0x3804, 0x3804); ++ const __m128i rss_vlan_msk = _mm_set_epi32( ++ 0x1c03004, 0x1c03004, 0x1c03004, 0x1c03004); + + /* map rss and vlan type to rss hash and vlan flag */ + const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, +@@ -163,23 +158,36 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, + 0, 0, PKT_RX_FDIR, 0); + +- vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]); +- vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]); +- vlan0 = _mm_unpacklo_epi32(vlan0, vlan1); ++ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, ++ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, ++ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, ++ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, ++ PKT_RX_EIP_CKSUM_BAD, ++ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, ++ PKT_RX_L4_CKSUM_BAD, ++ PKT_RX_IP_CKSUM_BAD, ++ 0); ++ ++ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]); ++ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]); ++ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1); + + vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); + vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); + +- rss = _mm_srli_epi16(vlan1, 11); ++ rss = _mm_srli_epi32(vlan1, 12); + rss = _mm_shuffle_epi8(rss_flags, rss); + ++ l3_l4e = _mm_srli_epi32(vlan1, 22); ++ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e); ++ + vlan0 = _mm_or_si128(vlan0, rss); +- vol.dword = _mm_cvtsi128_si64(vlan0); ++ vlan0 = _mm_or_si128(vlan0, l3_l4e); + +- rx_pkts[0]->ol_flags = vol.e[0]; +- rx_pkts[1]->ol_flags = vol.e[1]; +- rx_pkts[2]->ol_flags = vol.e[2]; +- rx_pkts[3]->ol_flags = vol.e[3]; ++ rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0); ++ rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2); ++ rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4); ++ rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6); + } + #else + #define desc_to_olflags_v(desc, rx_pkts) do {} while (0) +@@ -754,7 +762,8 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) + #ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE + /* whithout rx ol_flags, no VP flag report */ + if (rxmode->hw_vlan_strip != 0 || +- rxmode->hw_vlan_extend != 0) ++ rxmode->hw_vlan_extend != 0 || ++ rxmode->hw_ip_checksum != 0) + return -1; + #endif + +@@ -765,8 +774,7 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) + /* - no csum error report support + * - no header split support + */ +- if (rxmode->hw_ip_checksum == 1 || +- rxmode->header_split == 1) ++ if (rxmode->header_split == 1) + return -1; + + return 0; +-- +2.7.4 + diff --git a/vlib/vlib/dpdk_buffer.c b/vlib/vlib/dpdk_buffer.c index 84bca0f5..337fca9a 100644 --- a/vlib/vlib/dpdk_buffer.c +++ b/vlib/vlib/dpdk_buffer.c @@ -964,7 +964,9 @@ vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, vlib_buffer_main_t *bm = vm->buffer_main; vlib_physmem_main_t *vpm = &vm->physmem_main; struct rte_mempool *rmp; +#if RTE_VERSION < RTE_VERSION_NUM(16, 7, 0, 0) uword new_start, new_size; +#endif int i; if (!rte_pktmbuf_pool_create) @@ -985,16 +987,78 @@ vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE, /* dataroom size */ socket_id); /* cpu socket */ - vec_free (pool_name); - if (rmp) { - new_start = pointer_to_uword (rmp); #if RTE_VERSION >= RTE_VERSION_NUM(16, 7, 0, 0) - new_size = (uintptr_t)STAILQ_FIRST(&rmp->mem_list)->addr + STAILQ_FIRST(&rmp->mem_list)->len - new_start; + { + uword this_pool_end; + uword this_pool_start; + uword this_pool_size; + uword save_vpm_start, save_vpm_end, save_vpm_size; + struct rte_mempool_memhdr *memhdr; + + this_pool_start = ~0ULL; + this_pool_end = 0LL; + + STAILQ_FOREACH (memhdr, &rmp->mem_list, next) + { + if (((uword)(memhdr->addr + memhdr->len)) > this_pool_end) + this_pool_end = (uword)(memhdr->addr + memhdr->len); + if (((uword)memhdr->addr) < this_pool_start) + this_pool_start = (uword)(memhdr->addr); + } + ASSERT (this_pool_start < ~0ULL && this_pool_end > 0); + this_pool_size = this_pool_end - this_pool_start; + + if (CLIB_DEBUG > 1) + { + clib_warning ("%s: pool start %llx pool end %llx pool size %lld", + pool_name, this_pool_start, this_pool_end, + this_pool_size); + clib_warning + ("before: virtual.start %llx virtual.end %llx virtual.size %lld", + vpm->virtual.start, vpm->virtual.end, vpm->virtual.size); + } + + save_vpm_start = vpm->virtual.start; + save_vpm_end = vpm->virtual.end; + save_vpm_size = vpm->virtual.size; + + if ((this_pool_start < vpm->virtual.start) || vpm->virtual.start == 0) + vpm->virtual.start = this_pool_start; + if (this_pool_end > vpm->virtual.end) + vpm->virtual.end = this_pool_end; + + vpm->virtual.size = vpm->virtual.end - vpm->virtual.start; + + if (CLIB_DEBUG > 1) + { + clib_warning + ("after: virtual.start %llx virtual.end %llx virtual.size %lld", + vpm->virtual.start, vpm->virtual.end, vpm->virtual.size); + } + + /* check if fits into buffer index range */ + if ((u64) vpm->virtual.size > + ((u64) 1 << (32 + CLIB_LOG2_CACHE_LINE_BYTES))) + { + clib_warning ("physmem: virtual size out of range!"); + vpm->virtual.start = save_vpm_start; + vpm->virtual.end = save_vpm_end; + vpm->virtual.size = save_vpm_size; + rmp = 0; + } + } + if (rmp) + { + bm->pktmbuf_pools[socket_id] = rmp; + vec_free(pool_name); + return 0; + } + } #else + new_start = pointer_to_uword (rmp); new_size = rmp->elt_va_end - new_start; -#endif if (vpm->virtual.size > 0) { @@ -1021,8 +1085,12 @@ vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, vpm->virtual.start = new_start; vpm->virtual.size = new_size; vpm->virtual.end = new_start + new_size; + vec_free(pool_name); return 0; } +#endif + + vec_free (pool_name); /* no usable pool for this socket, try to use pool from another one */ for (i = 0; i < vec_len (bm->pktmbuf_pools); i++) diff --git a/vpp/vpp-api/api.c b/vpp/vpp-api/api.c index 2f076c0b..73ecbd72 100644 --- a/vpp/vpp-api/api.c +++ b/vpp/vpp-api/api.c @@ -3814,13 +3814,13 @@ vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t *mp) { int rv = 0; vl_api_create_vhost_user_if_reply_t * rmp; -#if DPDK > 0 && DPDK_VHOST_USER +#if DPDK > 0 u32 sw_if_index = (u32)~0; vnet_main_t * vnm = vnet_get_main(); vlib_main_t * vm = vlib_get_main(); - rv = dpdk_vhost_user_create_if(vnm, vm, (char *)mp->sock_filename, + rv = vhost_user_create_if(vnm, vm, (char *)mp->sock_filename, mp->is_server, &sw_if_index, (u64)~0, mp->renumber, ntohl(mp->custom_dev_instance), (mp->use_custom_mac)?mp->mac_address:NULL); -- cgit 1.2.3-korg From 39162275f65da6b9ce986099aa8fdc513efda9da Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 25 Oct 2016 14:54:38 +0200 Subject: dpdk: enable building with dpdk 16.11-rc1 Works with: http://dpdk.org/browse/dpdk/snapshot/dpdk-16.11-rc1.tar.xz placed into dpdk/ or ~/Downloads Change-Id: I17f6a721529dbefc796f555e2525d157b9bf8740 Signed-off-by: Damjan Marion --- dpdk/Makefile | 1 + vnet/vnet/devices/dpdk/dpdk.h | 20 ++++++++++++++++++++ vnet/vnet/devices/dpdk/init.c | 8 ++++++++ 3 files changed, 29 insertions(+) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index d73ddb62..97504252 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -30,6 +30,7 @@ DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_16.04_TARBALL_MD5_CKSUM := d1f82e7d7589b3b2f623c155442b8306 DPDK_16.07_TARBALL_MD5_CKSUM := 690a2bb570103e58d12f9806e8bf21be +DPDK_16.11-rc1_TARBALL_MD5_CKSUM := 69a13a554160622a0fdb87f52539073f DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) ifneq (,$(findstring clang,$(CC))) diff --git a/vnet/vnet/devices/dpdk/dpdk.h b/vnet/vnet/devices/dpdk/dpdk.h index c2ea579a..e34d4b97 100644 --- a/vnet/vnet/devices/dpdk/dpdk.h +++ b/vnet/vnet/devices/dpdk/dpdk.h @@ -65,6 +65,25 @@ extern vnet_device_class_t dpdk_device_class; extern vlib_node_registration_t dpdk_input_node; extern vlib_node_registration_t handoff_dispatch_node; +#if RTE_VERSION >= RTE_VERSION_NUM(16, 11, 0, 0) +#define foreach_dpdk_pmd \ + _ ("net_thunderx", THUNDERX) \ + _ ("net_e1000_em", E1000EM) \ + _ ("net_e1000_igb", IGB) \ + _ ("net_e1000_igb_vf", IGBVF) \ + _ ("net_ixgbe", IXGBE) \ + _ ("net_ixgbe_vf", IXGBEVF) \ + _ ("net_i40e", I40E) \ + _ ("net_i40e_vf", I40EVF) \ + _ ("net_virtio", VIRTIO) \ + _ ("net_enic", ENIC) \ + _ ("net_vmxnet3", VMXNET3) \ + _ ("net_af_packet", AF_PACKET) \ + _ ("net_bonding", BOND) \ + _ ("net_fm10k", FM10K) \ + _ ("net_cxgbe", CXGBE) \ + _ ("net_dpaa2", DPAA2) +#else #define foreach_dpdk_pmd \ _ ("rte_nicvf_pmd", THUNDERX) \ _ ("rte_em_pmd", E1000EM) \ @@ -82,6 +101,7 @@ extern vlib_node_registration_t handoff_dispatch_node; _ ("rte_pmd_fm10k", FM10K) \ _ ("rte_cxgbe_pmd", CXGBE) \ _ ("rte_dpaa2_dpni", DPAA2) +#endif typedef enum { diff --git a/vnet/vnet/devices/dpdk/init.c b/vnet/vnet/devices/dpdk/init.c index 7045e454..c57fcde3 100644 --- a/vnet/vnet/devices/dpdk/init.c +++ b/vnet/vnet/devices/dpdk/init.c @@ -431,7 +431,11 @@ dpdk_lib_init (dpdk_main_t * dm) /* workaround for drivers not setting driver_name */ if ((!dev_info.driver_name) && (dev_info.pci_dev)) +#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0) dev_info.driver_name = dev_info.pci_dev->driver->name; +#else + dev_info.driver_name = dev_info.pci_dev->driver->driver.name; +#endif ASSERT (dev_info.driver_name); if (!xd->pmd) @@ -1554,7 +1558,11 @@ dpdk_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) struct rte_eth_dev_info dev_info; rte_eth_dev_info_get (i, &dev_info); if (!dev_info.driver_name) +#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0) dev_info.driver_name = dev_info.pci_dev->driver->name; +#else + dev_info.driver_name = dev_info.pci_dev->driver->driver.name; +#endif ASSERT (dev_info.driver_name); if (strncmp (dev_info.driver_name, "rte_bond_pmd", 12) == 0) { -- cgit 1.2.3-korg From 66bcab2fff54531a126ddabe3b377df3fc536731 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 8 Nov 2016 20:31:11 +0100 Subject: dpdk: bump to 16.11-rc3 release Tarball available at: http://dpdk.org/browse/dpdk/snapshot/dpdk-16.11-rc3.tar.xz Change-Id: Ib34f32206d866888c4b5bf6609b8ae53f570daaa Signed-off-by: Damjan Marion --- dpdk/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 97504252..0f5c9326 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -30,7 +30,7 @@ DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_16.04_TARBALL_MD5_CKSUM := d1f82e7d7589b3b2f623c155442b8306 DPDK_16.07_TARBALL_MD5_CKSUM := 690a2bb570103e58d12f9806e8bf21be -DPDK_16.11-rc1_TARBALL_MD5_CKSUM := 69a13a554160622a0fdb87f52539073f +DPDK_16.11-rc3_TARBALL_MD5_CKSUM := b5d924070ea9320abe7d2c7b443a6b63 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) ifneq (,$(findstring clang,$(CC))) -- cgit 1.2.3-korg From d4895d54158a409124e59304b1b523ec4c2adab7 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 14 Nov 2016 21:45:20 +0100 Subject: dpdk: add build support for DPDK 16.11 release Change-Id: Icec79bfc5d786cd293520b1dcc6b8dccd1419acb Signed-off-by: Damjan Marion --- dpdk/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 0f5c9326..76c1df5d 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -30,7 +30,7 @@ DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_16.04_TARBALL_MD5_CKSUM := d1f82e7d7589b3b2f623c155442b8306 DPDK_16.07_TARBALL_MD5_CKSUM := 690a2bb570103e58d12f9806e8bf21be -DPDK_16.11-rc3_TARBALL_MD5_CKSUM := b5d924070ea9320abe7d2c7b443a6b63 +DPDK_16.11_TARBALL_MD5_CKSUM := 06c1c577795360719d0b4fafaeee21e9 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) ifneq (,$(findstring clang,$(CC))) -- cgit 1.2.3-korg From e0a2d1796a043001dcb97f9a2a870f7f9d82f44c Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 8 Nov 2016 20:39:21 +0100 Subject: dpdk: switch to 16.11 Change-Id: Icf9de5b89e5c2cda763e52d528fb70091860a754 Signed-off-by: Damjan Marion --- dpdk/Makefile | 2 +- dpdk/dkms/create_deb_manifest.sh | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 76c1df5d..5d072e59 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -24,7 +24,7 @@ DPDK_DEBUG ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) -DPDK_VERSION ?= 16.07 +DPDK_VERSION ?= 16.11 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) diff --git a/dpdk/dkms/create_deb_manifest.sh b/dpdk/dkms/create_deb_manifest.sh index e512850e..f8305588 100755 --- a/dpdk/dkms/create_deb_manifest.sh +++ b/dpdk/dkms/create_deb_manifest.sh @@ -9,7 +9,6 @@ SRC_DIR=/usr/src/vpp-dpdk-dkms-${VER}/ cat > ${DEBIAN_DIR}/vpp-dpdk-dkms.install << _EOF_ ${DPDK_ROOT}/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h ${SRC_DIR} ${DPDK_ROOT}/lib/librte_eal/common/include/rte_pci_dev_features.h ${SRC_DIR} -${DPDK_ROOT}/lib/librte_eal/common/include/rte_pci_dev_ids.h ${SRC_DIR} ${DPDK_ROOT}/lib/librte_eal/linuxapp/igb_uio/igb_uio.c ${SRC_DIR} ${DPDK_ROOT}/lib/librte_eal/linuxapp/igb_uio/compat.h ${SRC_DIR} ../../dpdk/dkms/Makefile ${SRC_DIR} -- cgit 1.2.3-korg From f3ebeda1da8b0646af7be6a04696f72572112f90 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Wed, 23 Nov 2016 23:47:53 +0100 Subject: dpdk: remove old patches Change-Id: I31244207ca5420558c6ff00b2021126ff5628e08 Signed-off-by: Damjan Marion --- dpdk/Makefile | 1 - ...1-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch | 75 - ...a-bit-longer-for-autonegotiation-to-leave.patch | 25 - ...Cleanup-virtio-pmd-debug-log-output-reset.patch | 65 - ...f-rearrange-rte_mbuf-metadata-to-suit-vpp.patch | 83 - ...low-applications-to-override-rte_delay_us.patch | 43 - ...mporarily-disable-unthrottled-log-message.patch | 26 - ...-in-igb_uio-driver-when-the-device-is-rem.patch | 36 - ...008-Add-missing-init-of-packet_type-field.patch | 70 - ...issed-to-count-drops-due-to-lack-of-RX-bu.patch | 32 - .../0010-Fix-O0-clang-build.patch | 32 - ...011-enic-fix-misalignment-of-Rx-mbuf-data.patch | 55 - ...zation-of-Tx-path-to-reduce-Host-CPU-over.patch | 1844 - ...vert-ixgbe-fix-packet-type-from-vector-Rx.patch | 128 - ...T_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch | 42 - .../0015-ENIC-counter-improvement.patch | 165 - dpdk/dpdk-16.04_patches/0016-ENIC-scatter-RX.patch | 672 - .../0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch | 40404 ------------------- ...gfault-on-Tx-path-after-restarting-a-devi.patch | 46 - ...-queue-initialization-after-restarting-a-.patch | 37 - ...x-releasing-mbufs-when-tearing-down-Rx-qu.patch | 43 - ...-net-enic-fix-crash-when-releasing-queues.patch | 61 - ...c-improve-out-of-resources-error-handling.patch | 67 - .../0023-net-enic-fix-memory-freeing.patch | 238 - ...-enic-fix-Rx-scatter-with-multiple-queues.patch | 80 - .../0025-enic-fixup-of-Rx-Scatter-patch.patch | 169 - ...x-setting-MAC-address-when-a-port-is-rest.patch | 45 - ...x-removing-old-MAC-address-when-setting-n.patch | 34 - ...Add-packet_type-metadata-in-the-i40e-vPMD.patch | 1184 - ...0e-Enable-bad-checksum-flags-in-i40e-vPMD.patch | 114 - .../0030-net-enic-fix-possible-Rx-corruption.patch | 47 - ...1-enic-fix-bug-introduced-with-scatter-rx.patch | 24 - .../0001-kni-fix-igb-build-with-kernel-4.2.patch | 78 - ...f-rearrange-rte_mbuf-metadata-to-suit-vpp.patch | 107 - ...3-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch | 75 - ...a-bit-longer-for-autonegotiation-to-leave.patch | 26 - ...-Temporarily-turn-off-unthrottled-RTE_LOG.patch | 29 - ...nup-virtio-pmd-debug-log-output-reset-off.patch | 77 - ...008-enic-fix-dma-addr-of-outgoing-packets.patch | 28 - ...1-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch | 75 - ...a-bit-longer-for-autonegotiation-to-leave.patch | 25 - ...Cleanup-virtio-pmd-debug-log-output-reset.patch | 76 - ...f-rearrange-rte_mbuf-metadata-to-suit-vpp.patch | 83 - dpdk/dpdk-2.2.0_patches/0005-missing-include.patch | 24 - ...-in-igb_uio-driver-when-the-device-is-rem.patch | 33 - ...low-applications-to-override-rte_delay_us.patch | 43 - ...008-Add-missing-init-of-packet_type-field.patch | 70 - ...mporarily-disable-unthrottled-log-message.patch | 26 - ...010-enic-fix-dma-addr-of-outgoing-packets.patch | 28 - .../0011-enic-improve-Rx-performance.patch | 1349 - .../0012-enic-fix-last-packet-not-being-sent.patch | 39 - ...c-add-missing-newline-to-print-statements.patch | 47 - .../0014-vmxnet3-support-jumbo-frames.patch | 171 - ...fix-crash-when-allocating-too-many-queues.patch | 51 - .../0016-enic-fix-mbuf-flags-on-Rx.patch | 43 - .../0017-enic-fix-error-packets-handling.patch | 117 - ...0018-enic-remove-packet-error-conditional.patch | 58 - .../0019-enic-update-maintainers.patch | 42 - .../0020-enic-fix-Rx-descriptor-limit.patch | 66 - ...TX-hang-when-number-of-packets-queue-size.patch | 89 - ...ix-bond-link-detect-in-non-interrupt-mode.patch | 76 - ...023-enic-expose-RX-missed-packets-counter.patch | 27 - .../0024-enic-fix-imissed-rx-counter.patch | 32 - ...025-enic-fix-misalignment-of-Rx-mbuf-data.patch | 55 - ...zation-of-Tx-path-to-reduce-Host-CPU-over.patch | 1844 - 65 files changed, 50996 deletions(-) delete mode 100644 dpdk/dpdk-16.04_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch delete mode 100644 dpdk/dpdk-16.04_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch delete mode 100644 dpdk/dpdk-16.04_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch delete mode 100644 dpdk/dpdk-16.04_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch delete mode 100644 dpdk/dpdk-16.04_patches/0005-Allow-applications-to-override-rte_delay_us.patch delete mode 100644 dpdk/dpdk-16.04_patches/0006-Temporarily-disable-unthrottled-log-message.patch delete mode 100644 dpdk/dpdk-16.04_patches/0007-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch delete mode 100644 dpdk/dpdk-16.04_patches/0008-Add-missing-init-of-packet_type-field.patch delete mode 100644 dpdk/dpdk-16.04_patches/0009-enic-fix-imissed-to-count-drops-due-to-lack-of-RX-bu.patch delete mode 100644 dpdk/dpdk-16.04_patches/0010-Fix-O0-clang-build.patch delete mode 100644 dpdk/dpdk-16.04_patches/0011-enic-fix-misalignment-of-Rx-mbuf-data.patch delete mode 100644 dpdk/dpdk-16.04_patches/0012-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch delete mode 100644 dpdk/dpdk-16.04_patches/0013-Revert-ixgbe-fix-packet-type-from-vector-Rx.patch delete mode 100644 dpdk/dpdk-16.04_patches/0014-enic-Set-PKT_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch delete mode 100644 dpdk/dpdk-16.04_patches/0015-ENIC-counter-improvement.patch delete mode 100644 dpdk/dpdk-16.04_patches/0016-ENIC-scatter-RX.patch delete mode 100644 dpdk/dpdk-16.04_patches/0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch delete mode 100644 dpdk/dpdk-16.04_patches/0018-enic-fix-segfault-on-Tx-path-after-restarting-a-devi.patch delete mode 100644 dpdk/dpdk-16.04_patches/0019-enic-fix-Rx-queue-initialization-after-restarting-a-.patch delete mode 100644 dpdk/dpdk-16.04_patches/0020-net-enic-fix-releasing-mbufs-when-tearing-down-Rx-qu.patch delete mode 100644 dpdk/dpdk-16.04_patches/0021-net-enic-fix-crash-when-releasing-queues.patch delete mode 100644 dpdk/dpdk-16.04_patches/0022-net-enic-improve-out-of-resources-error-handling.patch delete mode 100644 dpdk/dpdk-16.04_patches/0023-net-enic-fix-memory-freeing.patch delete mode 100644 dpdk/dpdk-16.04_patches/0024-net-enic-fix-Rx-scatter-with-multiple-queues.patch delete mode 100644 dpdk/dpdk-16.04_patches/0025-enic-fixup-of-Rx-Scatter-patch.patch delete mode 100644 dpdk/dpdk-16.04_patches/0026-net-enic-fix-setting-MAC-address-when-a-port-is-rest.patch delete mode 100644 dpdk/dpdk-16.04_patches/0027-net-enic-fix-removing-old-MAC-address-when-setting-n.patch delete mode 100644 dpdk/dpdk-16.04_patches/0028-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch delete mode 100644 dpdk/dpdk-16.04_patches/0029-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch delete mode 100644 dpdk/dpdk-16.04_patches/0030-net-enic-fix-possible-Rx-corruption.patch delete mode 100644 dpdk/dpdk-16.04_patches/0031-enic-fix-bug-introduced-with-scatter-rx.patch delete mode 100644 dpdk/dpdk-2.1.0_patches/0001-kni-fix-igb-build-with-kernel-4.2.patch delete mode 100644 dpdk/dpdk-2.1.0_patches/0002-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch delete mode 100644 dpdk/dpdk-2.1.0_patches/0003-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch delete mode 100644 dpdk/dpdk-2.1.0_patches/0004-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch delete mode 100644 dpdk/dpdk-2.1.0_patches/0005-eal-Temporarily-turn-off-unthrottled-RTE_LOG.patch delete mode 100644 dpdk/dpdk-2.1.0_patches/0006-virtio-Cleanup-virtio-pmd-debug-log-output-reset-off.patch delete mode 100644 dpdk/dpdk-2.1.0_patches/0008-enic-fix-dma-addr-of-outgoing-packets.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0005-missing-include.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0006-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0007-Allow-applications-to-override-rte_delay_us.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0008-Add-missing-init-of-packet_type-field.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0009-Temporarily-disable-unthrottled-log-message.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0010-enic-fix-dma-addr-of-outgoing-packets.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0011-enic-improve-Rx-performance.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0012-enic-fix-last-packet-not-being-sent.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0013-enic-add-missing-newline-to-print-statements.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0014-vmxnet3-support-jumbo-frames.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0015-enic-fix-crash-when-allocating-too-many-queues.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0016-enic-fix-mbuf-flags-on-Rx.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0017-enic-fix-error-packets-handling.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0018-enic-remove-packet-error-conditional.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0019-enic-update-maintainers.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0020-enic-fix-Rx-descriptor-limit.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0021-enic-fix-TX-hang-when-number-of-packets-queue-size.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0022-bonding-fix-bond-link-detect-in-non-interrupt-mode.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0023-enic-expose-RX-missed-packets-counter.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0024-enic-fix-imissed-rx-counter.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0025-enic-fix-misalignment-of-Rx-mbuf-data.patch delete mode 100644 dpdk/dpdk-2.2.0_patches/0026-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 5d072e59..2f5037df 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -28,7 +28,6 @@ DPDK_VERSION ?= 16.11 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) -DPDK_16.04_TARBALL_MD5_CKSUM := d1f82e7d7589b3b2f623c155442b8306 DPDK_16.07_TARBALL_MD5_CKSUM := 690a2bb570103e58d12f9806e8bf21be DPDK_16.11_TARBALL_MD5_CKSUM := 06c1c577795360719d0b4fafaeee21e9 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) diff --git a/dpdk/dpdk-16.04_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch b/dpdk/dpdk-16.04_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch deleted file mode 100644 index 044a4179..00000000 --- a/dpdk/dpdk-16.04_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch +++ /dev/null @@ -1,75 +0,0 @@ -From c085c9f9a7332c63d002169581edc89ef99fdbb1 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Wed, 16 Dec 2015 03:21:21 +0100 -Subject: [PATCH 1/6] e1000: Set VLAN Rx Offload tag correctly - ---- - drivers/net/e1000/igb_rxtx.c | 30 ++++++++++++++++++++++++++++++ - lib/librte_ether/rte_ether.h | 3 +++ - 2 files changed, 33 insertions(+) - -diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c -index 4a987e3..d6a4ce5 100644 ---- a/drivers/net/e1000/igb_rxtx.c -+++ b/drivers/net/e1000/igb_rxtx.c -@@ -904,6 +904,21 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); - pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); - pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); -+ { -+ /* -+ * Check packet for VLAN ethernet types and set -+ * RX Offload flag PKT_RX_VLAN_PKT accordingly. -+ */ -+ struct ether_hdr *eth_hdr = -+ rte_pktmbuf_mtod(rxm, struct ether_hdr *); -+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); -+ -+ if ((eth_type == ETHER_TYPE_VLAN) || -+ (eth_type == ETHER_TYPE_VLAN_AD) || -+ (eth_type == ETHER_TYPE_VLAN_9100) || -+ (eth_type == ETHER_TYPE_VLAN_9200)) -+ pkt_flags |= PKT_RX_VLAN_PKT; -+ } - rxm->ol_flags = pkt_flags; - rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower. - lo_dword.hs_rss.pkt_info); -@@ -1140,6 +1155,21 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); - pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); - pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); -+ { -+ /* -+ * Check packet for VLAN ethernet types and set -+ * RX Offload flag PKT_RX_VLAN_PKT accordingly. -+ */ -+ struct ether_hdr *eth_hdr = -+ rte_pktmbuf_mtod(rxm, struct ether_hdr *); -+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); -+ -+ if ((eth_type == ETHER_TYPE_VLAN) || -+ (eth_type == ETHER_TYPE_VLAN_AD) || -+ (eth_type == ETHER_TYPE_VLAN_9100) || -+ (eth_type == ETHER_TYPE_VLAN_9200)) -+ pkt_flags |= PKT_RX_VLAN_PKT; -+ } - first_seg->ol_flags = pkt_flags; - first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb. - lower.lo_dword.hs_rss.pkt_info); -diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h -index 1d62d8e..341121a 100644 ---- a/lib/librte_ether/rte_ether.h -+++ b/lib/librte_ether/rte_ether.h -@@ -332,6 +332,9 @@ struct vxlan_hdr { - #define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */ - #define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */ - #define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */ -+#define ETHER_TYPE_VLAN_AD 0x88a8 /**< IEEE 802.1AD VLAN tagging. */ -+#define ETHER_TYPE_VLAN_9100 0x9100 /**< VLAN 0x9100 tagging. */ -+#define ETHER_TYPE_VLAN_9200 0x9200 /**< VLAN 0x9200 tagging. */ - - #define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr)) - /**< VXLAN tunnel header length. */ --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch b/dpdk/dpdk-16.04_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch deleted file mode 100644 index 4b385467..00000000 --- a/dpdk/dpdk-16.04_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 8e1be5044b5ee29c8cb3921051fb6d0722b60651 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Wed, 16 Dec 2015 03:22:11 +0100 -Subject: [PATCH 2/6] ixgbe: Wait a bit longer for autonegotiation to leave - ---- - drivers/net/ixgbe/base/ixgbe_82599.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c -index 154c1f1..817a8b5 100644 ---- a/drivers/net/ixgbe/base/ixgbe_82599.c -+++ b/drivers/net/ixgbe/base/ixgbe_82599.c -@@ -2470,7 +2470,7 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, - autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); - /* Wait for AN to leave state 0 */ -- for (i = 0; i < 10; i++) { -+ for (i = 0; i < 50; i++) { - msec_delay(4); - anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); - if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch b/dpdk/dpdk-16.04_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch deleted file mode 100644 index 8c53d0f1..00000000 --- a/dpdk/dpdk-16.04_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch +++ /dev/null @@ -1,65 +0,0 @@ -From 1ee05e874eaa3f03ee7b5fbd6a32dff7304bd620 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Wed, 16 Dec 2015 03:29:22 +0100 -Subject: [PATCH 3/6] virtio: Cleanup virtio pmd debug log output, reset - ---- - drivers/net/virtio/virtio_ethdev.c | 5 ----- - drivers/net/virtio/virtio_rxtx.c | 4 +++- - 2 files changed, 3 insertions(+), 6 deletions(-) - -diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c -index 63a368a..ed4e757 100644 ---- a/drivers/net/virtio/virtio_ethdev.c -+++ b/drivers/net/virtio/virtio_ethdev.c -@@ -1405,18 +1405,13 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet - link.link_speed = SPEED_10G; - - if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { -- PMD_INIT_LOG(DEBUG, "Get link status from hw"); - vtpci_read_dev_config(hw, - offsetof(struct virtio_net_config, status), - &status, sizeof(status)); - if ((status & VIRTIO_NET_S_LINK_UP) == 0) { - link.link_status = ETH_LINK_DOWN; -- PMD_INIT_LOG(DEBUG, "Port %d is down", -- dev->data->port_id); - } else { - link.link_status = ETH_LINK_UP; -- PMD_INIT_LOG(DEBUG, "Port %d is up", -- dev->data->port_id); - } - } else { - link.link_status = ETH_LINK_UP; -diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c -index ef21d8e..7fe14ad 100644 ---- a/drivers/net/virtio/virtio_rxtx.c -+++ b/drivers/net/virtio/virtio_rxtx.c -@@ -643,6 +643,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxm->next = NULL; - rxm->pkt_len = (uint32_t)(len[i] - hdr_size); - rxm->data_len = (uint16_t)(len[i] - hdr_size); -+ rxm->ol_flags = 0; - - if (hw->vlan_strip) - rte_vlan_strip(rxm); -@@ -760,6 +761,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, - rxm->vlan_tci = 0; - rxm->pkt_len = (uint32_t)(len[0] - hdr_size); - rxm->data_len = (uint16_t)(len[0] - hdr_size); -+ rxm->ol_flags = 0; - - rxm->port = rxvq->port_id; - rx_pkts[nb_rx] = rxm; -@@ -863,7 +865,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) - if (unlikely(nb_pkts < 1)) - return nb_pkts; - -- PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); -+ PMD_TX_LOG(DEBUG, "%d packets to xmit\n", nb_pkts); - nb_used = VIRTQUEUE_NUSED(txvq); - - virtio_rmb(); --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch b/dpdk/dpdk-16.04_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch deleted file mode 100644 index 78d0c637..00000000 --- a/dpdk/dpdk-16.04_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch +++ /dev/null @@ -1,83 +0,0 @@ -From eed80f56477e26a5711ea3749d1881797b3c82a5 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Wed, 16 Dec 2015 04:25:23 +0100 -Subject: [PATCH 4/6] mbuf: rearrange rte_mbuf metadata to suit vpp - ---- - .../linuxapp/eal/include/exec-env/rte_kni_common.h | 5 +++-- - lib/librte_mbuf/rte_mbuf.h | 20 ++++++++++++-------- - 2 files changed, 15 insertions(+), 10 deletions(-) - -diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h -index 7e5e598..fdbeb4a 100644 ---- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h -+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h -@@ -118,11 +118,12 @@ struct rte_kni_mbuf { - char pad2[4]; - uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */ - uint16_t data_len; /**< Amount of data in segment buffer. */ -+ char pad3[8]; -+ void *next; - - /* fields on second cache line */ -- char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_MIN_SIZE))); -+ char pad4[16] __attribute__((__aligned__(RTE_CACHE_LINE_MIN_SIZE))); - void *pool; -- void *next; - }; - - /* -diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h -index 75a227d..ca4d0fb 100644 ---- a/lib/librte_mbuf/rte_mbuf.h -+++ b/lib/librte_mbuf/rte_mbuf.h -@@ -731,6 +731,12 @@ typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes - /** - * The generic rte_mbuf, containing a packet mbuf. - */ -+/* -+ * offload in the second cache line, next in the first. Better for vpp -+ * at least as of right now. -+ * If you change this structure, you must change the user-mode -+ * version in rte_mbuf.h -+ */ - struct rte_mbuf { - MARKER cacheline0; - -@@ -783,6 +789,12 @@ struct rte_mbuf { - uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ - uint16_t data_len; /**< Amount of data in segment buffer. */ - uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */ -+ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ -+ uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ -+ struct rte_mbuf *next; /**< Next segment of scattered packet. */ -+ -+ /* second cache line - fields only used in slow path or on TX */ -+ MARKER cacheline1 __rte_cache_min_aligned; - - union { - uint32_t rss; /**< RSS hash result if RSS enabled */ -@@ -806,20 +818,12 @@ struct rte_mbuf { - uint32_t usr; /**< User defined tags. See rte_distributor_process() */ - } hash; /**< hash information */ - -- uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ -- -- uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ -- -- /* second cache line - fields only used in slow path or on TX */ -- MARKER cacheline1 __rte_cache_min_aligned; -- - union { - void *userdata; /**< Can be used for external metadata */ - uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */ - }; - - struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */ -- struct rte_mbuf *next; /**< Next segment of scattered packet. */ - - /* fields to support TX offloads */ - union { --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0005-Allow-applications-to-override-rte_delay_us.patch b/dpdk/dpdk-16.04_patches/0005-Allow-applications-to-override-rte_delay_us.patch deleted file mode 100644 index 8a32f600..00000000 --- a/dpdk/dpdk-16.04_patches/0005-Allow-applications-to-override-rte_delay_us.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 3432c140c9c51e671a4d58bb428d5852426add1f Mon Sep 17 00:00:00 2001 -From: "Todd Foggoa (tfoggoa)" -Date: Wed, 3 Feb 2016 08:35:27 -0800 -Subject: [PATCH 5/6] Allow applications to override rte_delay_us() - -Some applications may wish to define their own implentation of -usec delay other than the existing blocking one. The default -behavior remains unchanged. - -Signed-off-by: Todd Foggoa (tfoggoa) ---- - lib/librte_eal/common/eal_common_timer.c | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/lib/librte_eal/common/eal_common_timer.c b/lib/librte_eal/common/eal_common_timer.c -index c4227cd..cc26b91 100644 ---- a/lib/librte_eal/common/eal_common_timer.c -+++ b/lib/librte_eal/common/eal_common_timer.c -@@ -47,9 +47,21 @@ - /* The frequency of the RDTSC timer resolution */ - static uint64_t eal_tsc_resolution_hz; - -+/* Allow an override of the rte_delay_us function */ -+int rte_delay_us_override (unsigned us) __attribute__((weak)); -+ -+int -+rte_delay_us_override(__attribute__((unused)) unsigned us) -+{ -+ return 0; -+} -+ - void - rte_delay_us(unsigned us) - { -+ if (rte_delay_us_override(us)) -+ return; -+ - const uint64_t start = rte_get_timer_cycles(); - const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6; - while ((rte_get_timer_cycles() - start) < ticks) --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0006-Temporarily-disable-unthrottled-log-message.patch b/dpdk/dpdk-16.04_patches/0006-Temporarily-disable-unthrottled-log-message.patch deleted file mode 100644 index 22415226..00000000 --- a/dpdk/dpdk-16.04_patches/0006-Temporarily-disable-unthrottled-log-message.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 454e25ed57c17ec18ee76ead4a75f9abdf579608 Mon Sep 17 00:00:00 2001 -From: Dave Barach -Date: Tue, 9 Feb 2016 10:22:39 -0500 -Subject: [PATCH 6/6] Temporarily disable unthrottled log message. - -Signed-off-by: Dave Barach ---- - lib/librte_eal/linuxapp/eal/eal_interrupts.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c -index 06b26a9..8d918a4 100644 ---- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c -+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c -@@ -711,6 +711,8 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds) - if (errno == EINTR || errno == EWOULDBLOCK) - continue; - -+ /* $$$ disable to avoid filling /var/log */ -+ if (0) - RTE_LOG(ERR, EAL, "Error reading from file " - "descriptor %d: %s\n", - events[n].data.fd, --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0007-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch b/dpdk/dpdk-16.04_patches/0007-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch deleted file mode 100644 index e938c7ca..00000000 --- a/dpdk/dpdk-16.04_patches/0007-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch +++ /dev/null @@ -1,36 +0,0 @@ -From a1020e16640e3b5d8cf32ef7d2914b788a1e06f6 Mon Sep 17 00:00:00 2001 -From: Bud Grise -Date: Tue, 2 Feb 2016 12:45:44 -0800 -Subject: [PATCH 7/8] Fix a crash in igb_uio driver when the device is removed. - -This crash happens because the device still has MSI configured, -the fix is to free the IRQ. - -Signed-off-by: Todd Foggoa (tfoggoa) ---- - lib/librte_eal/linuxapp/igb_uio/igb_uio.c | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c -index 72b2692..bf12391 100644 ---- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c -+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c -@@ -506,8 +506,15 @@ static void - igbuio_pci_remove(struct pci_dev *dev) - { - struct rte_uio_pci_dev *udev = pci_get_drvdata(dev); -+ struct uio_info *info = pci_get_drvdata(dev); - - sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); -+ -+ if (info->irq && (info->irq != UIO_IRQ_CUSTOM)){ -+ free_irq(info->irq, info->uio_dev); -+ info->irq = UIO_IRQ_NONE; -+ } -+ - uio_unregister_device(&udev->info); - igbuio_pci_release_iomem(&udev->info); - if (udev->mode == RTE_INTR_MODE_MSIX) --- -2.5.4 (Apple Git-61) - diff --git a/dpdk/dpdk-16.04_patches/0008-Add-missing-init-of-packet_type-field.patch b/dpdk/dpdk-16.04_patches/0008-Add-missing-init-of-packet_type-field.patch deleted file mode 100644 index f2ded92a..00000000 --- a/dpdk/dpdk-16.04_patches/0008-Add-missing-init-of-packet_type-field.patch +++ /dev/null @@ -1,70 +0,0 @@ -From dfb597dfb4c8e36edb4f1db0162a12f9e0d9e695 Mon Sep 17 00:00:00 2001 -From: Bud Grise -Date: Mon, 1 Feb 2016 14:28:01 -0500 -Subject: [PATCH 8/8] Add missing init of packet_type field. - -This can cause packets to be mishandled in systems with more than -one type of driver in use. - -Signed-off-by: Todd Foggoa (tfoggoa) ---- - drivers/net/e1000/em_rxtx.c | 2 ++ - drivers/net/virtio/virtio_rxtx.c | 2 ++ - drivers/net/vmxnet3/vmxnet3_rxtx.c | 1 + - 3 files changed, 5 insertions(+) - -diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c -index 441ccad..3bac431 100644 ---- a/drivers/net/e1000/em_rxtx.c -+++ b/drivers/net/e1000/em_rxtx.c -@@ -793,6 +793,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - rxm->ol_flags = rx_desc_status_to_pkt_flags(status); - rxm->ol_flags = rxm->ol_flags | - rx_desc_error_to_pkt_flags(rxd.errors); -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); -@@ -1019,6 +1020,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - first_seg->ol_flags = rx_desc_status_to_pkt_flags(status); - first_seg->ol_flags = first_seg->ol_flags | - rx_desc_error_to_pkt_flags(rxd.errors); -+ first_seg->packet_type = RTE_PTYPE_UNKNOWN; - - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); -diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c -index 7fe14ad..4959d8f 100644 ---- a/drivers/net/virtio/virtio_rxtx.c -+++ b/drivers/net/virtio/virtio_rxtx.c -@@ -644,6 +644,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxm->pkt_len = (uint32_t)(len[i] - hdr_size); - rxm->data_len = (uint16_t)(len[i] - hdr_size); - rxm->ol_flags = 0; -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - if (hw->vlan_strip) - rte_vlan_strip(rxm); -@@ -762,6 +763,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, - rxm->pkt_len = (uint32_t)(len[0] - hdr_size); - rxm->data_len = (uint16_t)(len[0] - hdr_size); - rxm->ol_flags = 0; -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - rxm->port = rxvq->port_id; - rx_pkts[nb_rx] = rxm; -diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c -index 4ac0456..d26d2a0 100644 ---- a/drivers/net/vmxnet3/vmxnet3_rxtx.c -+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c -@@ -701,6 +701,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxm->data_off = RTE_PKTMBUF_HEADROOM; - rxm->ol_flags = 0; - rxm->vlan_tci = 0; -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - /* - * If this is the first buffer of the received packet, --- -2.5.4 (Apple Git-61) - diff --git a/dpdk/dpdk-16.04_patches/0009-enic-fix-imissed-to-count-drops-due-to-lack-of-RX-bu.patch b/dpdk/dpdk-16.04_patches/0009-enic-fix-imissed-to-count-drops-due-to-lack-of-RX-bu.patch deleted file mode 100644 index b7a5e570..00000000 --- a/dpdk/dpdk-16.04_patches/0009-enic-fix-imissed-to-count-drops-due-to-lack-of-RX-bu.patch +++ /dev/null @@ -1,32 +0,0 @@ -From b41648c53981a534069a8ce1b75f189ba83e24c8 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Tue, 26 Apr 2016 13:30:50 -0700 -Subject: [PATCH 09/17] enic: fix 'imissed' to count drops due to lack of RX - buffers - -Fixes: 7182d3e7d177 ("enic: expose Rx missed packets counter") -Signed-off-by: John Daley ---- - drivers/net/enic/enic_main.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index e3da51d..06cacd4 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -243,10 +243,10 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) - r_stats->ibytes = stats->rx.rx_bytes_ok; - r_stats->obytes = stats->tx.tx_bytes_ok; - -- r_stats->ierrors = stats->rx.rx_errors; -+ r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop; - r_stats->oerrors = stats->tx.tx_errors; - -- r_stats->imissed = stats->rx.rx_drop; -+ r_stats->imissed = stats->rx.rx_no_bufs; - - r_stats->imcasts = stats->rx.rx_multicast_frames_ok; - r_stats->rx_nombuf = stats->rx.rx_no_bufs; --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0010-Fix-O0-clang-build.patch b/dpdk/dpdk-16.04_patches/0010-Fix-O0-clang-build.patch deleted file mode 100644 index 2ce0e7c8..00000000 --- a/dpdk/dpdk-16.04_patches/0010-Fix-O0-clang-build.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 2b82c248638bba6e98ecf388c6e0b1f5f0b44028 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Tue, 26 Apr 2016 12:36:52 +0200 -Subject: [PATCH] Fix -O0 clang build - -Signed-off-by: Damjan Marion ---- - lib/librte_eal/common/include/arch/x86/rte_rtm.h | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/lib/librte_eal/common/include/arch/x86/rte_rtm.h b/lib/librte_eal/common/include/arch/x86/rte_rtm.h -index d935641..30c1969 100644 ---- a/lib/librte_eal/common/include/arch/x86/rte_rtm.h -+++ b/lib/librte_eal/common/include/arch/x86/rte_rtm.h -@@ -50,11 +50,14 @@ void rte_xend(void) - asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory"); - } - -+#define rte_xabort(x) asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (x) : "memory") -+#if 0 - static __attribute__((__always_inline__)) inline - void rte_xabort(const unsigned int status) - { - asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory"); - } -+#endif - - static __attribute__((__always_inline__)) inline - int rte_xtest(void) --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0011-enic-fix-misalignment-of-Rx-mbuf-data.patch b/dpdk/dpdk-16.04_patches/0011-enic-fix-misalignment-of-Rx-mbuf-data.patch deleted file mode 100644 index 0d4267be..00000000 --- a/dpdk/dpdk-16.04_patches/0011-enic-fix-misalignment-of-Rx-mbuf-data.patch +++ /dev/null @@ -1,55 +0,0 @@ -From d91c4e2de969086ebc8c3a1dfa30913ea3de37b4 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Mon, 25 Apr 2016 16:24:53 -0700 -Subject: [PATCH 11/17] enic: fix misalignment of Rx mbuf data - -Data DMA used m->data_off of uninitialized mbufs instead of -RTE_PKTMBUF_HEADROOM, potentially causing Rx data to be -placed at the wrong alignment in the mbuf. - -Fixes: 947d860c821f ("enic: improve Rx performance") -Signed-off-by: John Daley ---- - drivers/net/enic/enic_main.c | 5 +++-- - drivers/net/enic/enic_rx.c | 6 ++++-- - 2 files changed, 7 insertions(+), 4 deletions(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 06cacd4..b164307 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -355,10 +355,11 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) - return -ENOMEM; - } - -- dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off); -+ dma_addr = (dma_addr_t)(mb->buf_physaddr -+ + RTE_PKTMBUF_HEADROOM); - - rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP, -- mb->buf_len); -+ mb->buf_len - RTE_PKTMBUF_HEADROOM); - rq->mbuf_ring[i] = mb; - } - -diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c -index 232987a..39bb55c 100644 ---- a/drivers/net/enic/enic_rx.c -+++ b/drivers/net/enic/enic_rx.c -@@ -314,9 +314,11 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - + rx_id); - - /* Push descriptor for newly allocated mbuf */ -- dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off); -+ dma_addr = (dma_addr_t)(nmb->buf_physaddr -+ + RTE_PKTMBUF_HEADROOM); - rqd_ptr->address = rte_cpu_to_le_64(dma_addr); -- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len); -+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len -+ - RTE_PKTMBUF_HEADROOM); - - /* Fill in the rest of the mbuf */ - rxmb->data_off = RTE_PKTMBUF_HEADROOM; --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0012-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch b/dpdk/dpdk-16.04_patches/0012-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch deleted file mode 100644 index 7acead45..00000000 --- a/dpdk/dpdk-16.04_patches/0012-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch +++ /dev/null @@ -1,1844 +0,0 @@ -From 4e1872a43b3ad824e37f840c9ed1e0c1f1b24a32 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Tue, 5 Apr 2016 15:19:06 -0700 -Subject: [PATCH 12/17] enic: Optimization of Tx path to reduce Host CPU - overhead, cleanup - -Optimizations and cleanup: -- flatten packet send path -- flatten mbuf free path -- disable CQ entry writing and use CQ messages instead -- use rte_mempool_put_bulk() to bulk return freed mbufs -- remove unnecessary fields vnic_bufs struct, use contiguous array of cache - aligned divisible elements. No next pointers. -- use local variables inside per packet loop instead of fields in structs. -- factor book keeping out of the per packet tx loop where possible - (removed several conditionals) -- put Tx and Rx code in 1 file (enic_rxtx.c) - -Reviewed-by: Nelson Escobar -Signed-off-by: John Daley ---- - drivers/net/enic/Makefile | 2 +- - drivers/net/enic/base/enic_vnic_wq.h | 79 ------ - drivers/net/enic/base/vnic_cq.h | 37 +-- - drivers/net/enic/base/vnic_rq.h | 2 +- - drivers/net/enic/base/vnic_wq.c | 89 +++--- - drivers/net/enic/base/vnic_wq.h | 113 +------- - drivers/net/enic/enic.h | 27 +- - drivers/net/enic/enic_ethdev.c | 67 +---- - drivers/net/enic/enic_main.c | 132 +++------ - drivers/net/enic/enic_res.h | 81 +----- - drivers/net/enic/enic_rx.c | 361 ------------------------- - drivers/net/enic/enic_rxtx.c | 505 +++++++++++++++++++++++++++++++++++ - 12 files changed, 635 insertions(+), 860 deletions(-) - delete mode 100644 drivers/net/enic/base/enic_vnic_wq.h - delete mode 100644 drivers/net/enic/enic_rx.c - create mode 100644 drivers/net/enic/enic_rxtx.c - -diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile -index f316274..3926b79 100644 ---- a/drivers/net/enic/Makefile -+++ b/drivers/net/enic/Makefile -@@ -53,7 +53,7 @@ VPATH += $(SRCDIR)/src - # - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c --SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c -+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c -diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h -deleted file mode 100644 -index b019109..0000000 ---- a/drivers/net/enic/base/enic_vnic_wq.h -+++ /dev/null -@@ -1,79 +0,0 @@ --/* -- * Copyright 2008-2015 Cisco Systems, Inc. All rights reserved. -- * Copyright 2007 Nuova Systems, Inc. All rights reserved. -- * -- * Copyright (c) 2015, Cisco Systems, Inc. -- * All rights reserved. -- * -- * Redistribution and use in source and binary forms, with or without -- * modification, are permitted provided that the following conditions -- * are met: -- * -- * 1. Redistributions of source code must retain the above copyright -- * notice, this list of conditions and the following disclaimer. -- * -- * 2. Redistributions in binary form must reproduce the above copyright -- * notice, this list of conditions and the following disclaimer in -- * the documentation and/or other materials provided with the -- * distribution. -- * -- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -- * POSSIBILITY OF SUCH DAMAGE. -- * -- */ -- --#ifndef _ENIC_VNIC_WQ_H_ --#define _ENIC_VNIC_WQ_H_ -- --#include "vnic_dev.h" --#include "vnic_cq.h" -- --static inline void enic_vnic_post_wq_index(struct vnic_wq *wq) --{ -- struct vnic_wq_buf *buf = wq->to_use; -- -- /* Adding write memory barrier prevents compiler and/or CPU -- * reordering, thus avoiding descriptor posting before -- * descriptor is initialized. Otherwise, hardware can read -- * stale descriptor fields. -- */ -- wmb(); -- iowrite32(buf->index, &wq->ctrl->posted_index); --} -- --static inline void enic_vnic_post_wq(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, -- unsigned int len, int sop, -- uint8_t desc_skip_cnt, uint8_t cq_entry, -- uint8_t compressed_send, uint64_t wrid) --{ -- struct vnic_wq_buf *buf = wq->to_use; -- -- buf->sop = sop; -- buf->cq_entry = cq_entry; -- buf->compressed_send = compressed_send; -- buf->desc_skip_cnt = desc_skip_cnt; -- buf->os_buf = os_buf; -- buf->dma_addr = dma_addr; -- buf->len = len; -- buf->wr_id = wrid; -- -- buf = buf->next; -- wq->ring.desc_avail -= desc_skip_cnt; -- wq->to_use = buf; -- -- if (cq_entry) -- enic_vnic_post_wq_index(wq); --} -- --#endif /* _ENIC_VNIC_WQ_H_ */ -diff --git a/drivers/net/enic/base/vnic_cq.h b/drivers/net/enic/base/vnic_cq.h -index 922391b..ffc1aaa 100644 ---- a/drivers/net/enic/base/vnic_cq.h -+++ b/drivers/net/enic/base/vnic_cq.h -@@ -96,41 +96,46 @@ static inline unsigned int vnic_cq_service(struct vnic_cq *cq, - u8 type, u16 q_number, u16 completed_index, void *opaque), - void *opaque) - { -- struct cq_desc *cq_desc; -+ struct cq_desc *cq_desc, *cq_desc_last; - unsigned int work_done = 0; - u16 q_number, completed_index; -- u8 type, color; -- struct rte_mbuf **rx_pkts = opaque; -- unsigned int ret; -+ u8 type, color, type_color; - - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); -- cq_desc_dec(cq_desc, &type, &color, -- &q_number, &completed_index); -+ -+ type_color = cq_desc->type_color; -+ color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; -+ if (color == cq->last_color) -+ return 0; - - while (color != cq->last_color) { -- if (opaque) -- opaque = (void *)&(rx_pkts[work_done]); -+ cq_desc_last = cq_desc; - -- ret = (*q_service)(cq->vdev, cq_desc, type, -- q_number, completed_index, opaque); - cq->to_clean++; - if (cq->to_clean == cq->ring.desc_count) { - cq->to_clean = 0; - cq->last_color = cq->last_color ? 0 : 1; - } - -+ work_done++; -+ if (work_done >= work_to_do) -+ break; -+ - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); -- cq_desc_dec(cq_desc, &type, &color, -- &q_number, &completed_index); - -- if (ret) -- work_done++; -- if (work_done >= work_to_do) -- break; -+ type_color = cq_desc->type_color; -+ color = (type_color >> CQ_DESC_COLOR_SHIFT) -+ & CQ_DESC_COLOR_MASK; -+ - } - -+ cq_desc_dec(cq_desc_last, &type, &color, -+ &q_number, &completed_index); -+ -+ (*q_service)(cq->vdev, cq_desc, type, -+ q_number, completed_index, opaque); - return work_done; - } - -diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h -index e083ccc..424415c 100644 ---- a/drivers/net/enic/base/vnic_rq.h -+++ b/drivers/net/enic/base/vnic_rq.h -@@ -74,7 +74,7 @@ struct vnic_rq { - struct vnic_dev_ring ring; - struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */ - unsigned int mbuf_next_idx; /* next mb to consume */ -- void *os_buf_head; -+ void *mb_head; - unsigned int pkts_outstanding; - uint16_t rx_nb_hold; - uint16_t rx_free_thresh; -diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c -index a3ef417..ccbbd61 100644 ---- a/drivers/net/enic/base/vnic_wq.c -+++ b/drivers/net/enic/base/vnic_wq.c -@@ -59,71 +59,30 @@ int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, - - static int vnic_wq_alloc_bufs(struct vnic_wq *wq) - { -- struct vnic_wq_buf *buf; -- unsigned int i, j, count = wq->ring.desc_count; -- unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); -- -- for (i = 0; i < blks; i++) { -- wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); -- if (!wq->bufs[i]) -- return -ENOMEM; -- } -- -- for (i = 0; i < blks; i++) { -- buf = wq->bufs[i]; -- for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) { -- buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j; -- buf->desc = (u8 *)wq->ring.descs + -- wq->ring.desc_size * buf->index; -- if (buf->index + 1 == count) { -- buf->next = wq->bufs[0]; -- break; -- } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { -- buf->next = wq->bufs[i + 1]; -- } else { -- buf->next = buf + 1; -- buf++; -- } -- } -- } -- -- wq->to_use = wq->to_clean = wq->bufs[0]; -- -+ unsigned int count = wq->ring.desc_count; -+ /* Allocate the mbuf ring */ -+ wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs", -+ sizeof(struct vnic_wq_buf) * count, -+ RTE_CACHE_LINE_SIZE, wq->socket_id); -+ wq->head_idx = 0; -+ wq->tail_idx = 0; -+ if (wq->bufs == NULL) -+ return -ENOMEM; - return 0; - } - - void vnic_wq_free(struct vnic_wq *wq) - { - struct vnic_dev *vdev; -- unsigned int i; - - vdev = wq->vdev; - - vnic_dev_free_desc_ring(vdev, &wq->ring); - -- for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { -- if (wq->bufs[i]) { -- kfree(wq->bufs[i]); -- wq->bufs[i] = NULL; -- } -- } -- -+ rte_free(wq->bufs); - wq->ctrl = NULL; - } - --int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count, -- unsigned int desc_size) --{ -- int mem_size = 0; -- -- mem_size += vnic_dev_desc_ring_size(&wq->ring, desc_count, desc_size); -- -- mem_size += VNIC_WQ_BUF_BLKS_NEEDED(wq->ring.desc_count) * -- VNIC_WQ_BUF_BLK_SZ(wq->ring.desc_count); -- -- return mem_size; --} -- - - int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, - unsigned int desc_count, unsigned int desc_size) -@@ -172,9 +131,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, - iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); - iowrite32(0, &wq->ctrl->error_status); - -- wq->to_use = wq->to_clean = -- &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] -- [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; -+ wq->head_idx = fetch_index; -+ wq->tail_idx = wq->head_idx; - } - - void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, -@@ -184,6 +142,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, - vnic_wq_init_start(wq, cq_index, 0, 0, - error_interrupt_enable, - error_interrupt_offset); -+ wq->last_completed_index = 0; - } - - void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error) -@@ -219,22 +178,34 @@ int vnic_wq_disable(struct vnic_wq *wq) - return -ETIMEDOUT; - } - -+static inline uint32_t -+buf_idx_incr(uint32_t n_descriptors, uint32_t idx) -+{ -+ idx++; -+ if (unlikely(idx == n_descriptors)) -+ idx = 0; -+ return idx; -+} -+ - void vnic_wq_clean(struct vnic_wq *wq, -- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) -+ void (*buf_clean)(struct vnic_wq_buf *buf)) - { - struct vnic_wq_buf *buf; -+ unsigned int to_clean = wq->tail_idx; - -- buf = wq->to_clean; -+ buf = &wq->bufs[to_clean]; - - while (vnic_wq_desc_used(wq) > 0) { - -- (*buf_clean)(wq, buf); -+ (*buf_clean)(buf); -+ to_clean = buf_idx_incr(wq->ring.desc_count, to_clean); - -- buf = wq->to_clean = buf->next; -+ buf = &wq->bufs[to_clean]; - wq->ring.desc_avail++; - } - -- wq->to_use = wq->to_clean = wq->bufs[0]; -+ wq->head_idx = 0; -+ wq->tail_idx = 0; - - iowrite32(0, &wq->ctrl->fetch_index); - iowrite32(0, &wq->ctrl->posted_index); -diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h -index c23de62..37c3ff9 100644 ---- a/drivers/net/enic/base/vnic_wq.h -+++ b/drivers/net/enic/base/vnic_wq.h -@@ -64,42 +64,23 @@ struct vnic_wq_ctrl { - u32 pad9; - }; - -+/* 16 bytes */ - struct vnic_wq_buf { -- struct vnic_wq_buf *next; -- dma_addr_t dma_addr; -- void *os_buf; -- unsigned int len; -- unsigned int index; -- int sop; -- void *desc; -- uint64_t wr_id; /* Cookie */ -- uint8_t cq_entry; /* Gets completion event from hw */ -- uint8_t desc_skip_cnt; /* Num descs to occupy */ -- uint8_t compressed_send; /* Both hdr and payload in one desc */ -+ struct rte_mempool *pool; -+ void *mb; - }; - --/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ --#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 --#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 --#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ -- ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ -- VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)) --#define VNIC_WQ_BUF_BLK_SZ(entries) \ -- (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) --#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ -- DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) --#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) -- - struct vnic_wq { - unsigned int index; - struct vnic_dev *vdev; - struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ - struct vnic_dev_ring ring; -- struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; -- struct vnic_wq_buf *to_use; -- struct vnic_wq_buf *to_clean; -- unsigned int pkts_outstanding; -+ struct vnic_wq_buf *bufs; -+ unsigned int head_idx; -+ unsigned int tail_idx; - unsigned int socket_id; -+ const struct rte_memzone *cqmsg_rz; -+ uint16_t last_completed_index; - }; - - static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) -@@ -114,11 +95,6 @@ static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) - return wq->ring.desc_count - wq->ring.desc_avail - 1; - } - --static inline void *vnic_wq_next_desc(struct vnic_wq *wq) --{ -- return wq->to_use->desc; --} -- - #define PI_LOG2_CACHE_LINE_SIZE 5 - #define PI_INDEX_BITS 12 - #define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1) -@@ -191,75 +167,6 @@ static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len, - PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF); - } - --static inline void vnic_wq_post(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, -- unsigned int len, int sop, int eop, -- uint8_t desc_skip_cnt, uint8_t cq_entry, -- uint8_t compressed_send, uint64_t wrid) --{ -- struct vnic_wq_buf *buf = wq->to_use; -- -- buf->sop = sop; -- buf->cq_entry = cq_entry; -- buf->compressed_send = compressed_send; -- buf->desc_skip_cnt = desc_skip_cnt; -- buf->os_buf = os_buf; -- buf->dma_addr = dma_addr; -- buf->len = len; -- buf->wr_id = wrid; -- -- buf = buf->next; -- if (eop) { --#ifdef DO_PREFETCH -- uint64_t wr = vnic_cached_posted_index(dma_addr, len, -- buf->index); --#endif -- /* Adding write memory barrier prevents compiler and/or CPU -- * reordering, thus avoiding descriptor posting before -- * descriptor is initialized. Otherwise, hardware can read -- * stale descriptor fields. -- */ -- wmb(); --#ifdef DO_PREFETCH -- /* Intel chipsets seem to limit the rate of PIOs that we can -- * push on the bus. Thus, it is very important to do a single -- * 64 bit write here. With two 32-bit writes, my maximum -- * pkt/sec rate was cut almost in half. -AJF -- */ -- iowrite64((uint64_t)wr, &wq->ctrl->posted_index); --#else -- iowrite32(buf->index, &wq->ctrl->posted_index); --#endif -- } -- wq->to_use = buf; -- -- wq->ring.desc_avail -= desc_skip_cnt; --} -- --static inline void vnic_wq_service(struct vnic_wq *wq, -- struct cq_desc *cq_desc, u16 completed_index, -- void (*buf_service)(struct vnic_wq *wq, -- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), -- void *opaque) --{ -- struct vnic_wq_buf *buf; -- -- buf = wq->to_clean; -- while (1) { -- -- (*buf_service)(wq, cq_desc, buf, opaque); -- -- wq->ring.desc_avail++; -- -- wq->to_clean = buf->next; -- -- if (buf->index == completed_index) -- break; -- -- buf = wq->to_clean; -- } --} -- - void vnic_wq_free(struct vnic_wq *wq); - int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, - unsigned int desc_count, unsigned int desc_size); -@@ -275,8 +182,6 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq); - void vnic_wq_enable(struct vnic_wq *wq); - int vnic_wq_disable(struct vnic_wq *wq); - void vnic_wq_clean(struct vnic_wq *wq, -- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); --int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count, -- unsigned int desc_size); -+ void (*buf_clean)(struct vnic_wq_buf *buf)); - - #endif /* _VNIC_WQ_H_ */ -diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h -index 8c914f5..43b82a6 100644 ---- a/drivers/net/enic/enic.h -+++ b/drivers/net/enic/enic.h -@@ -155,6 +155,30 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev) - return (struct enic *)eth_dev->data->dev_private; - } - -+static inline uint32_t -+enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) -+{ -+ uint32_t d = i0 + i1; -+ d -= (d >= n_descriptors) ? n_descriptors : 0; -+ return d; -+} -+ -+static inline uint32_t -+enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1) -+{ -+ int32_t d = i1 - i0; -+ return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d); -+} -+ -+static inline uint32_t -+enic_ring_incr(uint32_t n_descriptors, uint32_t idx) -+{ -+ idx++; -+ if (unlikely(idx == n_descriptors)) -+ idx = 0; -+ return idx; -+} -+ - #define RTE_LIBRTE_ENIC_ASSERT_ENABLE - #ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE - #define ASSERT(x) do { \ -@@ -209,5 +233,6 @@ extern int enic_clsf_init(struct enic *enic); - extern void enic_clsf_destroy(struct enic *enic); - uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -- -+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, -+ uint16_t nb_pkts); - #endif /* _ENIC_H_ */ -diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c -index 6bea940..697ff82 100644 ---- a/drivers/net/enic/enic_ethdev.c -+++ b/drivers/net/enic/enic_ethdev.c -@@ -519,71 +519,6 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui - enic_del_mac_address(enic); - } - -- --static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, -- uint16_t nb_pkts) --{ -- uint16_t index; -- unsigned int frags; -- unsigned int pkt_len; -- unsigned int seg_len; -- unsigned int inc_len; -- unsigned int nb_segs; -- struct rte_mbuf *tx_pkt, *next_tx_pkt; -- struct vnic_wq *wq = (struct vnic_wq *)tx_queue; -- struct enic *enic = vnic_dev_priv(wq->vdev); -- unsigned short vlan_id; -- unsigned short ol_flags; -- uint8_t last_seg, eop; -- unsigned int host_tx_descs = 0; -- -- for (index = 0; index < nb_pkts; index++) { -- tx_pkt = *tx_pkts++; -- inc_len = 0; -- nb_segs = tx_pkt->nb_segs; -- if (nb_segs > vnic_wq_desc_avail(wq)) { -- if (index > 0) -- enic_post_wq_index(wq); -- -- /* wq cleanup and try again */ -- if (!enic_cleanup_wq(enic, wq) || -- (nb_segs > vnic_wq_desc_avail(wq))) { -- return index; -- } -- } -- -- pkt_len = tx_pkt->pkt_len; -- vlan_id = tx_pkt->vlan_tci; -- ol_flags = tx_pkt->ol_flags; -- for (frags = 0; inc_len < pkt_len; frags++) { -- if (!tx_pkt) -- break; -- next_tx_pkt = tx_pkt->next; -- seg_len = tx_pkt->data_len; -- inc_len += seg_len; -- -- host_tx_descs++; -- last_seg = 0; -- eop = 0; -- if ((pkt_len == inc_len) || !next_tx_pkt) { -- eop = 1; -- /* post if last packet in batch or > thresh */ -- if ((index == (nb_pkts - 1)) || -- (host_tx_descs > ENIC_TX_POST_THRESH)) { -- last_seg = 1; -- host_tx_descs = 0; -- } -- } -- enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len, -- !frags, eop, last_seg, ol_flags, vlan_id); -- tx_pkt = next_tx_pkt; -- } -- } -- -- enic_cleanup_wq(enic, wq); -- return index; --} -- - static const struct eth_dev_ops enicpmd_eth_dev_ops = { - .dev_configure = enicpmd_dev_configure, - .dev_start = enicpmd_dev_start, -@@ -642,7 +577,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) - enic->rte_dev = eth_dev; - eth_dev->dev_ops = &enicpmd_eth_dev_ops; - eth_dev->rx_pkt_burst = &enic_recv_pkts; -- eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts; -+ eth_dev->tx_pkt_burst = &enic_xmit_pkts; - - pdev = eth_dev->pci_dev; - rte_eth_copy_pci_info(eth_dev, pdev); -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index b164307..9bfdec1 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -40,11 +40,11 @@ - #include - - #include --#include - #include - #include - #include - #include -+#include - - #include "enic_compat.h" - #include "enic.h" -@@ -58,7 +58,6 @@ - #include "vnic_cq.h" - #include "vnic_intr.h" - #include "vnic_nic.h" --#include "enic_vnic_wq.h" - - static inline struct rte_mbuf * - rte_rxmbuf_alloc(struct rte_mempool *mp) -@@ -109,38 +108,17 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) - } - } - -- - void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size) - { - vnic_set_hdr_split_size(enic->vdev, split_hdr_size); - } - --static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf) -+static void enic_free_wq_buf(struct vnic_wq_buf *buf) - { -- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf; -+ struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb; - - rte_mempool_put(mbuf->pool, mbuf); -- buf->os_buf = NULL; --} -- --static void enic_wq_free_buf(struct vnic_wq *wq, -- __rte_unused struct cq_desc *cq_desc, -- struct vnic_wq_buf *buf, -- __rte_unused void *opaque) --{ -- enic_free_wq_buf(wq, buf); --} -- --static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, -- __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque) --{ -- struct enic *enic = vnic_dev_priv(vdev); -- -- vnic_wq_service(&enic->wq[q_number], cq_desc, -- completed_index, enic_wq_free_buf, -- opaque); -- -- return 0; -+ buf->mb = NULL; - } - - static void enic_log_q_error(struct enic *enic) -@@ -163,64 +141,6 @@ static void enic_log_q_error(struct enic *enic) - } - } - --unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq) --{ -- unsigned int cq = enic_cq_wq(enic, wq->index); -- -- /* Return the work done */ -- return vnic_cq_service(&enic->cq[cq], -- -1 /*wq_work_to_do*/, enic_wq_service, NULL); --} -- --void enic_post_wq_index(struct vnic_wq *wq) --{ -- enic_vnic_post_wq_index(wq); --} -- --void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, -- struct rte_mbuf *tx_pkt, unsigned short len, -- uint8_t sop, uint8_t eop, uint8_t cq_entry, -- uint16_t ol_flags, uint16_t vlan_tag) --{ -- struct wq_enet_desc *desc = vnic_wq_next_desc(wq); -- uint16_t mss = 0; -- uint8_t vlan_tag_insert = 0; -- uint64_t bus_addr = (dma_addr_t) -- (tx_pkt->buf_physaddr + tx_pkt->data_off); -- -- if (sop) { -- if (ol_flags & PKT_TX_VLAN_PKT) -- vlan_tag_insert = 1; -- -- if (enic->hw_ip_checksum) { -- if (ol_flags & PKT_TX_IP_CKSUM) -- mss |= ENIC_CALC_IP_CKSUM; -- -- if (ol_flags & PKT_TX_TCP_UDP_CKSUM) -- mss |= ENIC_CALC_TCP_UDP_CKSUM; -- } -- } -- -- wq_enet_desc_enc(desc, -- bus_addr, -- len, -- mss, -- 0 /* header_length */, -- 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */, -- eop, -- cq_entry, -- 0 /* fcoe_encap */, -- vlan_tag_insert, -- vlan_tag, -- 0 /* loopback */); -- -- enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len, -- sop, -- 1 /*desc_skip_cnt*/, -- cq_entry, -- 0 /*compressed send*/, -- 0 /*wrid*/); --} - - void enic_dev_stats_clear(struct enic *enic) - { -@@ -298,12 +218,28 @@ void enic_init_vnic_resources(struct enic *enic) - unsigned int error_interrupt_enable = 1; - unsigned int error_interrupt_offset = 0; - unsigned int index = 0; -+ unsigned int cq_idx; -+ -+ vnic_dev_stats_clear(enic->vdev); - - for (index = 0; index < enic->rq_count; index++) { - vnic_rq_init(&enic->rq[index], - enic_cq_rq(enic, index), - error_interrupt_enable, - error_interrupt_offset); -+ -+ cq_idx = enic_cq_rq(enic, index); -+ vnic_cq_init(&enic->cq[cq_idx], -+ 0 /* flow_control_enable */, -+ 1 /* color_enable */, -+ 0 /* cq_head */, -+ 0 /* cq_tail */, -+ 1 /* cq_tail_color */, -+ 0 /* interrupt_enable */, -+ 1 /* cq_entry_enable */, -+ 0 /* cq_message_enable */, -+ 0 /* interrupt offset */, -+ 0 /* cq_message_addr */); - } - - for (index = 0; index < enic->wq_count; index++) { -@@ -311,22 +247,19 @@ void enic_init_vnic_resources(struct enic *enic) - enic_cq_wq(enic, index), - error_interrupt_enable, - error_interrupt_offset); -- } -- -- vnic_dev_stats_clear(enic->vdev); - -- for (index = 0; index < enic->cq_count; index++) { -- vnic_cq_init(&enic->cq[index], -+ cq_idx = enic_cq_wq(enic, index); -+ vnic_cq_init(&enic->cq[cq_idx], - 0 /* flow_control_enable */, - 1 /* color_enable */, - 0 /* cq_head */, - 0 /* cq_tail */, - 1 /* cq_tail_color */, - 0 /* interrupt_enable */, -- 1 /* cq_entry_enable */, -- 0 /* cq_message_enable */, -+ 0 /* cq_entry_enable */, -+ 1 /* cq_message_enable */, - 0 /* interrupt offset */, -- 0 /* cq_message_addr */); -+ (u64)enic->wq[index].cqmsg_rz->phys_addr); - } - - vnic_intr_init(&enic->intr, -@@ -570,6 +503,7 @@ void enic_free_wq(void *txq) - struct vnic_wq *wq = (struct vnic_wq *)txq; - struct enic *enic = vnic_dev_priv(wq->vdev); - -+ rte_memzone_free(wq->cqmsg_rz); - vnic_wq_free(wq); - vnic_cq_free(&enic->cq[enic->rq_count + wq->index]); - } -@@ -580,6 +514,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, - int err; - struct vnic_wq *wq = &enic->wq[queue_idx]; - unsigned int cq_index = enic_cq_wq(enic, queue_idx); -+ char name[NAME_MAX]; -+ static int instance; - - wq->socket_id = socket_id; - if (nb_desc) { -@@ -615,6 +551,18 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, - dev_err(enic, "error in allocation of cq for wq\n"); - } - -+ /* setup up CQ message */ -+ snprintf((char *)name, sizeof(name), -+ "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx, -+ instance++); -+ -+ wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name, -+ sizeof(uint32_t), -+ SOCKET_ID_ANY, 0, -+ ENIC_ALIGN); -+ if (!wq->cqmsg_rz) -+ return -ENOMEM; -+ - return err; - } - -diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h -index 00fa71d..3e1bdf5 100644 ---- a/drivers/net/enic/enic_res.h -+++ b/drivers/net/enic/enic_res.h -@@ -53,89 +53,10 @@ - - #define ENIC_NON_TSO_MAX_DESC 16 - #define ENIC_DEFAULT_RX_FREE_THRESH 32 --#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2) -+#define ENIC_TX_XMIT_MAX 64 - - #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) - --static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, unsigned int len, -- unsigned int mss_or_csum_offset, unsigned int hdr_len, -- int vlan_tag_insert, unsigned int vlan_tag, -- int offload_mode, int cq_entry, int sop, int eop, int loopback) --{ -- struct wq_enet_desc *desc = vnic_wq_next_desc(wq); -- u8 desc_skip_cnt = 1; -- u8 compressed_send = 0; -- u64 wrid = 0; -- -- wq_enet_desc_enc(desc, -- (u64)dma_addr | VNIC_PADDR_TARGET, -- (u16)len, -- (u16)mss_or_csum_offset, -- (u16)hdr_len, (u8)offload_mode, -- (u8)eop, (u8)cq_entry, -- 0, /* fcoe_encap */ -- (u8)vlan_tag_insert, -- (u16)vlan_tag, -- (u8)loopback); -- -- vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, -- (u8)cq_entry, compressed_send, wrid); --} -- --static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, unsigned int len, -- int eop, int loopback) --{ -- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, -- 0, 0, 0, 0, 0, -- eop, 0 /* !SOP */, eop, loopback); --} -- --static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, -- dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert, -- unsigned int vlan_tag, int eop, int loopback) --{ -- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, -- 0, 0, vlan_tag_insert, vlan_tag, -- WQ_ENET_OFFLOAD_MODE_CSUM, -- eop, 1 /* SOP */, eop, loopback); --} -- --static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, unsigned int len, -- int ip_csum, int tcpudp_csum, int vlan_tag_insert, -- unsigned int vlan_tag, int eop, int loopback) --{ -- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, -- (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0), -- 0, vlan_tag_insert, vlan_tag, -- WQ_ENET_OFFLOAD_MODE_CSUM, -- eop, 1 /* SOP */, eop, loopback); --} -- --static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, unsigned int len, -- unsigned int csum_offset, unsigned int hdr_len, -- int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) --{ -- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, -- csum_offset, hdr_len, vlan_tag_insert, vlan_tag, -- WQ_ENET_OFFLOAD_MODE_CSUM_L4, -- eop, 1 /* SOP */, eop, loopback); --} -- --static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, unsigned int len, -- unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, -- unsigned int vlan_tag, int eop, int loopback) --{ -- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, -- mss, hdr_len, vlan_tag_insert, vlan_tag, -- WQ_ENET_OFFLOAD_MODE_TSO, -- eop, 1 /* SOP */, eop, loopback); --} -- - struct enic; - - int enic_get_vnic_config(struct enic *); -diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c -deleted file mode 100644 -index 39bb55c..0000000 ---- a/drivers/net/enic/enic_rx.c -+++ /dev/null -@@ -1,361 +0,0 @@ --/* -- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. -- * Copyright 2007 Nuova Systems, Inc. All rights reserved. -- * -- * Copyright (c) 2014, Cisco Systems, Inc. -- * All rights reserved. -- * -- * Redistribution and use in source and binary forms, with or without -- * modification, are permitted provided that the following conditions -- * are met: -- * -- * 1. Redistributions of source code must retain the above copyright -- * notice, this list of conditions and the following disclaimer. -- * -- * 2. Redistributions in binary form must reproduce the above copyright -- * notice, this list of conditions and the following disclaimer in -- * the documentation and/or other materials provided with the -- * distribution. -- * -- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -- * POSSIBILITY OF SUCH DAMAGE. -- * -- */ -- --#include --#include --#include -- --#include "enic_compat.h" --#include "rq_enet_desc.h" --#include "enic.h" -- --#define RTE_PMD_USE_PREFETCH -- --#ifdef RTE_PMD_USE_PREFETCH --/* -- * Prefetch a cache line into all cache levels. -- */ --#define rte_enic_prefetch(p) rte_prefetch0(p) --#else --#define rte_enic_prefetch(p) do {} while (0) --#endif -- --#ifdef RTE_PMD_PACKET_PREFETCH --#define rte_packet_prefetch(p) rte_prefetch1(p) --#else --#define rte_packet_prefetch(p) do {} while (0) --#endif -- --static inline struct rte_mbuf * --rte_rxmbuf_alloc(struct rte_mempool *mp) --{ -- struct rte_mbuf *m; -- -- m = __rte_mbuf_raw_alloc(mp); -- __rte_mbuf_sanity_check_raw(m, 0); -- return m; --} -- --static inline uint16_t --enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) --{ -- return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; --} -- --static inline uint16_t --enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) --{ -- return(le16_to_cpu(crd->bytes_written_flags) & -- ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK); --} -- --static inline uint8_t --enic_cq_rx_desc_packet_error(uint16_t bwflags) --{ -- return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == -- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED); --} -- --static inline uint8_t --enic_cq_rx_desc_eop(uint16_t ciflags) --{ -- return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) -- == CQ_ENET_RQ_DESC_FLAGS_EOP; --} -- --static inline uint8_t --enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) --{ -- return ((le16_to_cpu(cqrd->q_number_rss_type_flags) & -- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == -- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC); --} -- --static inline uint8_t --enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) --{ -- return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == -- CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK); --} -- --static inline uint8_t --enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) --{ -- return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == -- CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK); --} -- --static inline uint8_t --enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) --{ -- return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> -- CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); --} -- --static inline uint32_t --enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) --{ -- return le32_to_cpu(cqrd->rss_hash); --} -- --static inline uint16_t --enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) --{ -- return le16_to_cpu(cqrd->vlan); --} -- --static inline uint16_t --enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) --{ -- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -- return le16_to_cpu(cqrd->bytes_written_flags) & -- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; --} -- --static inline uint8_t --enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out) --{ -- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -- uint16_t bwflags; -- int ret = 0; -- uint64_t pkt_err_flags = 0; -- -- bwflags = enic_cq_rx_desc_bwflags(cqrd); -- if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) { -- pkt_err_flags = PKT_RX_MAC_ERR; -- ret = 1; -- } -- *pkt_err_flags_out = pkt_err_flags; -- return ret; --} -- --/* -- * Lookup table to translate RX CQ flags to mbuf flags. -- */ --static inline uint32_t --enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) --{ -- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -- uint8_t cqrd_flags = cqrd->flags; -- static const uint32_t cq_type_table[128] __rte_cache_aligned = { -- [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, -- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -- | RTE_PTYPE_L4_UDP, -- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -- | RTE_PTYPE_L4_TCP, -- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -- | RTE_PTYPE_L4_FRAG, -- [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, -- [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -- | RTE_PTYPE_L4_UDP, -- [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -- | RTE_PTYPE_L4_TCP, -- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -- | RTE_PTYPE_L4_FRAG, -- /* All others reserved */ -- }; -- cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT -- | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 -- | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; -- return cq_type_table[cqrd_flags]; --} -- --static inline void --enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) --{ -- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -- uint16_t ciflags, bwflags, pkt_flags = 0; -- ciflags = enic_cq_rx_desc_ciflags(cqrd); -- bwflags = enic_cq_rx_desc_bwflags(cqrd); -- -- mbuf->ol_flags = 0; -- -- /* flags are meaningless if !EOP */ -- if (unlikely(!enic_cq_rx_desc_eop(ciflags))) -- goto mbuf_flags_done; -- -- /* VLAN stripping */ -- if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { -- pkt_flags |= PKT_RX_VLAN_PKT; -- mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); -- } else { -- mbuf->vlan_tci = 0; -- } -- -- /* RSS flag */ -- if (enic_cq_rx_desc_rss_type(cqrd)) { -- pkt_flags |= PKT_RX_RSS_HASH; -- mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); -- } -- -- /* checksum flags */ -- if (!enic_cq_rx_desc_csum_not_calc(cqrd) && -- (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) { -- if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd))) -- pkt_flags |= PKT_RX_IP_CKSUM_BAD; -- if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { -- if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))) -- pkt_flags |= PKT_RX_L4_CKSUM_BAD; -- } -- } -- -- mbuf_flags_done: -- mbuf->ol_flags = pkt_flags; --} -- --static inline uint32_t --enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) --{ -- uint32_t d = i0 + i1; -- ASSERT(i0 < n_descriptors); -- ASSERT(i1 < n_descriptors); -- d -= (d >= n_descriptors) ? n_descriptors : 0; -- return d; --} -- -- --uint16_t --enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, -- uint16_t nb_pkts) --{ -- struct vnic_rq *rq = rx_queue; -- struct enic *enic = vnic_dev_priv(rq->vdev); -- unsigned int rx_id; -- struct rte_mbuf *nmb, *rxmb; -- uint16_t nb_rx = 0; -- uint16_t nb_hold; -- struct vnic_cq *cq; -- volatile struct cq_desc *cqd_ptr; -- uint8_t color; -- -- cq = &enic->cq[enic_cq_rq(enic, rq->index)]; -- rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */ -- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -- -- nb_hold = rq->rx_nb_hold; /* mbufs held by software */ -- -- while (nb_rx < nb_pkts) { -- volatile struct rq_enet_desc *rqd_ptr; -- dma_addr_t dma_addr; -- struct cq_desc cqd; -- uint64_t ol_err_flags; -- uint8_t packet_error; -- -- /* Check for pkts available */ -- color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) -- & CQ_DESC_COLOR_MASK; -- if (color == cq->last_color) -- break; -- -- /* Get the cq descriptor and rq pointer */ -- cqd = *cqd_ptr; -- rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id; -- -- /* allocate a new mbuf */ -- nmb = rte_rxmbuf_alloc(rq->mp); -- if (nmb == NULL) { -- dev_err(enic, "RX mbuf alloc failed port=%u qid=%u", -- enic->port_id, (unsigned)rq->index); -- rte_eth_devices[enic->port_id]. -- data->rx_mbuf_alloc_failed++; -- break; -- } -- -- /* A packet error means descriptor and data are untrusted */ -- packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags); -- -- /* Get the mbuf to return and replace with one just allocated */ -- rxmb = rq->mbuf_ring[rx_id]; -- rq->mbuf_ring[rx_id] = nmb; -- -- /* Increment cqd, rqd, mbuf_table index */ -- rx_id++; -- if (unlikely(rx_id == rq->ring.desc_count)) { -- rx_id = 0; -- cq->last_color = cq->last_color ? 0 : 1; -- } -- -- /* Prefetch next mbuf & desc while processing current one */ -- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -- rte_enic_prefetch(cqd_ptr); -- rte_enic_prefetch(rq->mbuf_ring[rx_id]); -- rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs) -- + rx_id); -- -- /* Push descriptor for newly allocated mbuf */ -- dma_addr = (dma_addr_t)(nmb->buf_physaddr -- + RTE_PKTMBUF_HEADROOM); -- rqd_ptr->address = rte_cpu_to_le_64(dma_addr); -- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len -- - RTE_PKTMBUF_HEADROOM); -- -- /* Fill in the rest of the mbuf */ -- rxmb->data_off = RTE_PKTMBUF_HEADROOM; -- rxmb->nb_segs = 1; -- rxmb->next = NULL; -- rxmb->port = enic->port_id; -- if (!packet_error) { -- rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd); -- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); -- enic_cq_rx_to_pkt_flags(&cqd, rxmb); -- } else { -- rxmb->pkt_len = 0; -- rxmb->packet_type = 0; -- rxmb->ol_flags = 0; -- } -- rxmb->data_len = rxmb->pkt_len; -- -- /* prefetch mbuf data for caller */ -- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, -- RTE_PKTMBUF_HEADROOM)); -- -- /* store the mbuf address into the next entry of the array */ -- rx_pkts[nb_rx++] = rxmb; -- } -- -- nb_hold += nb_rx; -- cq->to_clean = rx_id; -- -- if (nb_hold > rq->rx_free_thresh) { -- rq->posted_index = enic_ring_add(rq->ring.desc_count, -- rq->posted_index, nb_hold); -- nb_hold = 0; -- rte_mb(); -- iowrite32(rq->posted_index, &rq->ctrl->posted_index); -- } -- -- rq->rx_nb_hold = nb_hold; -- -- return nb_rx; --} -diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c -new file mode 100644 -index 0000000..71ca34e ---- /dev/null -+++ b/drivers/net/enic/enic_rxtx.c -@@ -0,0 +1,505 @@ -+/* -+ * Copyright 2008-2016 Cisco Systems, Inc. All rights reserved. -+ * Copyright 2007 Nuova Systems, Inc. All rights reserved. -+ * -+ * Copyright (c) 2016, Cisco Systems, Inc. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * 1. Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * -+ * 2. Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "enic_compat.h" -+#include "rq_enet_desc.h" -+#include "enic.h" -+ -+#define RTE_PMD_USE_PREFETCH -+ -+#ifdef RTE_PMD_USE_PREFETCH -+/* -+ * Prefetch a cache line into all cache levels. -+ */ -+#define rte_enic_prefetch(p) rte_prefetch0(p) -+#else -+#define rte_enic_prefetch(p) do {} while (0) -+#endif -+ -+#ifdef RTE_PMD_PACKET_PREFETCH -+#define rte_packet_prefetch(p) rte_prefetch1(p) -+#else -+#define rte_packet_prefetch(p) do {} while (0) -+#endif -+ -+static inline struct rte_mbuf * -+rte_rxmbuf_alloc(struct rte_mempool *mp) -+{ -+ struct rte_mbuf *m; -+ -+ m = __rte_mbuf_raw_alloc(mp); -+ __rte_mbuf_sanity_check_raw(m, 0); -+ return m; -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) -+{ -+ return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) -+{ -+ return(le16_to_cpu(crd->bytes_written_flags) & -+ ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_packet_error(uint16_t bwflags) -+{ -+ return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == -+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_eop(uint16_t ciflags) -+{ -+ return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) -+ == CQ_ENET_RQ_DESC_FLAGS_EOP; -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) -+{ -+ return ((le16_to_cpu(cqrd->q_number_rss_type_flags) & -+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == -+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) -+{ -+ return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == -+ CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) -+{ -+ return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == -+ CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) -+{ -+ return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> -+ CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); -+} -+ -+static inline uint32_t -+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) -+{ -+ return le32_to_cpu(cqrd->rss_hash); -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) -+{ -+ return le16_to_cpu(cqrd->vlan); -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ return le16_to_cpu(cqrd->bytes_written_flags) & -+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; -+} -+ -+static inline uint8_t -+enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ uint16_t bwflags; -+ int ret = 0; -+ uint64_t pkt_err_flags = 0; -+ -+ bwflags = enic_cq_rx_desc_bwflags(cqrd); -+ if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) { -+ pkt_err_flags = PKT_RX_MAC_ERR; -+ ret = 1; -+ } -+ *pkt_err_flags_out = pkt_err_flags; -+ return ret; -+} -+ -+/* -+ * Lookup table to translate RX CQ flags to mbuf flags. -+ */ -+static inline uint32_t -+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ uint8_t cqrd_flags = cqrd->flags; -+ static const uint32_t cq_type_table[128] __rte_cache_aligned = { -+ [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, -+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -+ | RTE_PTYPE_L4_UDP, -+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -+ | RTE_PTYPE_L4_TCP, -+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -+ | RTE_PTYPE_L4_FRAG, -+ [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, -+ [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -+ | RTE_PTYPE_L4_UDP, -+ [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -+ | RTE_PTYPE_L4_TCP, -+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -+ | RTE_PTYPE_L4_FRAG, -+ /* All others reserved */ -+ }; -+ cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT -+ | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 -+ | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; -+ return cq_type_table[cqrd_flags]; -+} -+ -+static inline void -+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ uint16_t ciflags, bwflags, pkt_flags = 0; -+ ciflags = enic_cq_rx_desc_ciflags(cqrd); -+ bwflags = enic_cq_rx_desc_bwflags(cqrd); -+ -+ mbuf->ol_flags = 0; -+ -+ /* flags are meaningless if !EOP */ -+ if (unlikely(!enic_cq_rx_desc_eop(ciflags))) -+ goto mbuf_flags_done; -+ -+ /* VLAN stripping */ -+ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { -+ pkt_flags |= PKT_RX_VLAN_PKT; -+ mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); -+ } else { -+ mbuf->vlan_tci = 0; -+ } -+ -+ /* RSS flag */ -+ if (enic_cq_rx_desc_rss_type(cqrd)) { -+ pkt_flags |= PKT_RX_RSS_HASH; -+ mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); -+ } -+ -+ /* checksum flags */ -+ if (!enic_cq_rx_desc_csum_not_calc(cqrd) && -+ (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) { -+ if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd))) -+ pkt_flags |= PKT_RX_IP_CKSUM_BAD; -+ if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { -+ if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))) -+ pkt_flags |= PKT_RX_L4_CKSUM_BAD; -+ } -+ } -+ -+ mbuf_flags_done: -+ mbuf->ol_flags = pkt_flags; -+} -+ -+uint16_t -+enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, -+ uint16_t nb_pkts) -+{ -+ struct vnic_rq *rq = rx_queue; -+ struct enic *enic = vnic_dev_priv(rq->vdev); -+ unsigned int rx_id; -+ struct rte_mbuf *nmb, *rxmb; -+ uint16_t nb_rx = 0; -+ uint16_t nb_hold; -+ struct vnic_cq *cq; -+ volatile struct cq_desc *cqd_ptr; -+ uint8_t color; -+ -+ cq = &enic->cq[enic_cq_rq(enic, rq->index)]; -+ rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */ -+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -+ -+ nb_hold = rq->rx_nb_hold; /* mbufs held by software */ -+ -+ while (nb_rx < nb_pkts) { -+ volatile struct rq_enet_desc *rqd_ptr; -+ dma_addr_t dma_addr; -+ struct cq_desc cqd; -+ uint64_t ol_err_flags; -+ uint8_t packet_error; -+ -+ /* Check for pkts available */ -+ color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) -+ & CQ_DESC_COLOR_MASK; -+ if (color == cq->last_color) -+ break; -+ -+ /* Get the cq descriptor and rq pointer */ -+ cqd = *cqd_ptr; -+ rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id; -+ -+ /* allocate a new mbuf */ -+ nmb = rte_rxmbuf_alloc(rq->mp); -+ if (nmb == NULL) { -+ dev_err(enic, "RX mbuf alloc failed port=%u qid=%u", -+ enic->port_id, (unsigned)rq->index); -+ rte_eth_devices[enic->port_id]. -+ data->rx_mbuf_alloc_failed++; -+ break; -+ } -+ -+ /* A packet error means descriptor and data are untrusted */ -+ packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags); -+ -+ /* Get the mbuf to return and replace with one just allocated */ -+ rxmb = rq->mbuf_ring[rx_id]; -+ rq->mbuf_ring[rx_id] = nmb; -+ -+ /* Increment cqd, rqd, mbuf_table index */ -+ rx_id++; -+ if (unlikely(rx_id == rq->ring.desc_count)) { -+ rx_id = 0; -+ cq->last_color = cq->last_color ? 0 : 1; -+ } -+ -+ /* Prefetch next mbuf & desc while processing current one */ -+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -+ rte_enic_prefetch(cqd_ptr); -+ rte_enic_prefetch(rq->mbuf_ring[rx_id]); -+ rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs) -+ + rx_id); -+ -+ /* Push descriptor for newly allocated mbuf */ -+ dma_addr = (dma_addr_t)(nmb->buf_physaddr -+ + RTE_PKTMBUF_HEADROOM); -+ rqd_ptr->address = rte_cpu_to_le_64(dma_addr); -+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len -+ - RTE_PKTMBUF_HEADROOM); -+ -+ /* Fill in the rest of the mbuf */ -+ rxmb->data_off = RTE_PKTMBUF_HEADROOM; -+ rxmb->nb_segs = 1; -+ rxmb->next = NULL; -+ rxmb->port = enic->port_id; -+ if (!packet_error) { -+ rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd); -+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); -+ enic_cq_rx_to_pkt_flags(&cqd, rxmb); -+ } else { -+ rxmb->pkt_len = 0; -+ rxmb->packet_type = 0; -+ rxmb->ol_flags = 0; -+ } -+ rxmb->data_len = rxmb->pkt_len; -+ -+ /* prefetch mbuf data for caller */ -+ rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, -+ RTE_PKTMBUF_HEADROOM)); -+ -+ /* store the mbuf address into the next entry of the array */ -+ rx_pkts[nb_rx++] = rxmb; -+ } -+ -+ nb_hold += nb_rx; -+ cq->to_clean = rx_id; -+ -+ if (nb_hold > rq->rx_free_thresh) { -+ rq->posted_index = enic_ring_add(rq->ring.desc_count, -+ rq->posted_index, nb_hold); -+ nb_hold = 0; -+ rte_mb(); -+ iowrite32(rq->posted_index, &rq->ctrl->posted_index); -+ } -+ -+ rq->rx_nb_hold = nb_hold; -+ -+ return nb_rx; -+} -+ -+static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) -+{ -+ struct vnic_wq_buf *buf; -+ struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS]; -+ unsigned int nb_to_free, nb_free = 0, i; -+ struct rte_mempool *pool; -+ unsigned int tail_idx; -+ unsigned int desc_count = wq->ring.desc_count; -+ -+ nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index) -+ + 1; -+ tail_idx = wq->tail_idx; -+ buf = &wq->bufs[tail_idx]; -+ pool = ((struct rte_mbuf *)buf->mb)->pool; -+ for (i = 0; i < nb_to_free; i++) { -+ buf = &wq->bufs[tail_idx]; -+ m = (struct rte_mbuf *)(buf->mb); -+ if (likely(m->pool == pool)) { -+ ASSERT(nb_free < ENIC_MAX_WQ_DESCS); -+ free[nb_free++] = m; -+ } else { -+ rte_mempool_put_bulk(pool, (void *)free, nb_free); -+ free[0] = m; -+ nb_free = 1; -+ pool = m->pool; -+ } -+ tail_idx = enic_ring_incr(desc_count, tail_idx); -+ buf->mb = NULL; -+ } -+ -+ rte_mempool_put_bulk(pool, (void **)free, nb_free); -+ -+ wq->tail_idx = tail_idx; -+ wq->ring.desc_avail += nb_to_free; -+} -+ -+unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) -+{ -+ u16 completed_index; -+ -+ completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff; -+ -+ if (wq->last_completed_index != completed_index) { -+ enic_free_wq_bufs(wq, completed_index); -+ wq->last_completed_index = completed_index; -+ } -+ return 0; -+} -+ -+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, -+ uint16_t nb_pkts) -+{ -+ uint16_t index; -+ unsigned int pkt_len, data_len; -+ unsigned int nb_segs; -+ struct rte_mbuf *tx_pkt; -+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue; -+ struct enic *enic = vnic_dev_priv(wq->vdev); -+ unsigned short vlan_id; -+ unsigned short ol_flags; -+ unsigned int wq_desc_avail; -+ int head_idx; -+ struct vnic_wq_buf *buf; -+ unsigned int hw_ip_cksum_enabled; -+ unsigned int desc_count; -+ struct wq_enet_desc *descs, *desc_p, desc_tmp; -+ uint16_t mss; -+ uint8_t vlan_tag_insert; -+ uint8_t eop; -+ uint64_t bus_addr; -+ -+ enic_cleanup_wq(enic, wq); -+ wq_desc_avail = vnic_wq_desc_avail(wq); -+ head_idx = wq->head_idx; -+ desc_count = wq->ring.desc_count; -+ -+ nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX); -+ -+ hw_ip_cksum_enabled = enic->hw_ip_checksum; -+ for (index = 0; index < nb_pkts; index++) { -+ tx_pkt = *tx_pkts++; -+ nb_segs = tx_pkt->nb_segs; -+ if (nb_segs > wq_desc_avail) { -+ if (index > 0) -+ goto post; -+ goto done; -+ } -+ -+ pkt_len = tx_pkt->pkt_len; -+ data_len = tx_pkt->data_len; -+ vlan_id = tx_pkt->vlan_tci; -+ ol_flags = tx_pkt->ol_flags; -+ -+ mss = 0; -+ vlan_tag_insert = 0; -+ bus_addr = (dma_addr_t) -+ (tx_pkt->buf_physaddr + tx_pkt->data_off); -+ -+ descs = (struct wq_enet_desc *)wq->ring.descs; -+ desc_p = descs + head_idx; -+ -+ eop = (data_len == pkt_len); -+ -+ if (ol_flags & PKT_TX_VLAN_PKT) -+ vlan_tag_insert = 1; -+ -+ if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_IP_CKSUM)) -+ mss |= ENIC_CALC_IP_CKSUM; -+ -+ if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_TCP_UDP_CKSUM)) -+ mss |= ENIC_CALC_TCP_UDP_CKSUM; -+ -+ wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop, -+ eop, 0, vlan_tag_insert, vlan_id, 0); -+ -+ *desc_p = desc_tmp; -+ buf = &wq->bufs[head_idx]; -+ buf->mb = (void *)tx_pkt; -+ head_idx = enic_ring_incr(desc_count, head_idx); -+ wq_desc_avail--; -+ -+ if (!eop) { -+ for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt = -+ tx_pkt->next) { -+ data_len = tx_pkt->data_len; -+ -+ if (tx_pkt->next == NULL) -+ eop = 1; -+ desc_p = descs + head_idx; -+ bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr -+ + tx_pkt->data_off); -+ wq_enet_desc_enc((struct wq_enet_desc *) -+ &desc_tmp, bus_addr, data_len, -+ mss, 0, 0, eop, eop, 0, -+ vlan_tag_insert, vlan_id, 0); -+ -+ *desc_p = desc_tmp; -+ buf = &wq->bufs[head_idx]; -+ buf->mb = (void *)tx_pkt; -+ head_idx = enic_ring_incr(desc_count, head_idx); -+ wq_desc_avail--; -+ } -+ } -+ } -+ post: -+ rte_wmb(); -+ iowrite32(head_idx, &wq->ctrl->posted_index); -+ done: -+ wq->ring.desc_avail = wq_desc_avail; -+ wq->head_idx = head_idx; -+ -+ return index; -+} --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0013-Revert-ixgbe-fix-packet-type-from-vector-Rx.patch b/dpdk/dpdk-16.04_patches/0013-Revert-ixgbe-fix-packet-type-from-vector-Rx.patch deleted file mode 100644 index e64ed590..00000000 --- a/dpdk/dpdk-16.04_patches/0013-Revert-ixgbe-fix-packet-type-from-vector-Rx.patch +++ /dev/null @@ -1,128 +0,0 @@ -From 33f94cb41621f2816db702b6b104f4642eefa857 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Fri, 29 Apr 2016 19:51:35 +0200 -Subject: [PATCH 13/17] Revert "ixgbe: fix packet type from vector Rx" - -This reverts commit d9a2009a81089093645fea2e04b51dd37edf3e6f. ---- - drivers/net/ixgbe/ixgbe_ethdev.c | 4 +++- - drivers/net/ixgbe/ixgbe_rxtx_vec.c | 34 +++++++++++++++++++++++----------- - 2 files changed, 26 insertions(+), 12 deletions(-) - -diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c -index 3f1ebc1..c48cb52 100644 ---- a/drivers/net/ixgbe/ixgbe_ethdev.c -+++ b/drivers/net/ixgbe/ixgbe_ethdev.c -@@ -3000,7 +3000,9 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) - if (dev->rx_pkt_burst == ixgbe_recv_pkts || - dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || - dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || -- dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) -+ dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc || -+ dev->rx_pkt_burst == ixgbe_recv_pkts_vec || -+ dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) - return ptypes; - return NULL; - } -diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c b/drivers/net/ixgbe/ixgbe_rxtx_vec.c -index 5040704..ccd93c7 100644 ---- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c -+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c -@@ -220,6 +220,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - 0, 0 /* ignore pkt_type field */ - ); - __m128i dd_check, eop_check; -+ __m128i desc_mask = _mm_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, -+ 0xFFFFFFFF, 0xFFFF07F0); - - /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ - nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST); -@@ -257,8 +259,9 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - 13, 12, /* octet 12~13, 16 bits data_len */ - 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ - 13, 12, /* octet 12~13, low 16 bits pkt_len */ -- 0xFF, 0xFF, /* skip 32 bit pkt_type */ -- 0xFF, 0xFF -+ 0xFF, 0xFF, /* skip high 16 bits pkt_type */ -+ 1, /* octet 1, 8 bits pkt_type field */ -+ 0 /* octet 0, 4 bits offset 4 pkt_type field */ - ); - - /* Cache is empty -> need to scan the buffer rings, but first move -@@ -275,6 +278,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; - pos += RTE_IXGBE_DESCS_PER_LOOP, - rxdp += RTE_IXGBE_DESCS_PER_LOOP) { -+ __m128i descs0[RTE_IXGBE_DESCS_PER_LOOP]; - __m128i descs[RTE_IXGBE_DESCS_PER_LOOP]; - __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; - __m128i zero, staterr, sterr_tmp1, sterr_tmp2; -@@ -285,7 +289,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - - /* Read desc statuses backwards to avoid race condition */ - /* A.1 load 4 pkts desc */ -- descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); -+ descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); - - /* B.2 copy 2 mbuf point into rx_pkts */ - _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); -@@ -293,10 +297,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - /* B.1 load 1 mbuf point */ - mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]); - -- descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); -+ descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); - /* B.1 load 2 mbuf point */ -- descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); -- descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); -+ descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); -+ descs0[0] = _mm_loadu_si128((__m128i *)(rxdp)); - - /* B.2 copy 2 mbuf point into rx_pkts */ - _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); -@@ -308,6 +312,14 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - rte_prefetch0(&rx_pkts[pos + 3]->cacheline1); - } - -+ /* A* mask out 0~3 bits RSS type */ -+ descs[3] = _mm_and_si128(descs0[3], desc_mask); -+ descs[2] = _mm_and_si128(descs0[2], desc_mask); -+ -+ /* A* mask out 0~3 bits RSS type */ -+ descs[1] = _mm_and_si128(descs0[1], desc_mask); -+ descs[0] = _mm_and_si128(descs0[0], desc_mask); -+ - /* avoid compiler reorder optimization */ - rte_compiler_barrier(); - -@@ -315,22 +327,22 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); - pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); - -- /* D.1 pkt 1,2 convert format from desc to pktmbuf */ -- pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); -- pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); -- - /* C.1 4=>2 filter staterr info only */ - sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); - /* C.1 4=>2 filter staterr info only */ - sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); - - /* set ol_flags with vlan packet type */ -- desc_to_olflags_v(descs, &rx_pkts[pos]); -+ desc_to_olflags_v(descs0, &rx_pkts[pos]); - - /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ - pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); - pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); - -+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */ -+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); -+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); -+ - /* C.2 get 4 pkts staterr value */ - zero = _mm_xor_si128(dd_check, dd_check); - staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0014-enic-Set-PKT_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch b/dpdk/dpdk-16.04_patches/0014-enic-Set-PKT_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch deleted file mode 100644 index e510446b..00000000 --- a/dpdk/dpdk-16.04_patches/0014-enic-Set-PKT_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 6a7a9e52ed2ccfa86c2def3a66a368a5577f2fc2 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Tue, 3 May 2016 13:56:05 -0700 -Subject: [PATCH] enic: Set PKT_RX_VLAN_PKT iff returned packet has VLAN tag - -Only set the ol_flags PKT_RX_VLAN_PKT bit if the packet being passed -to the application contains a VLAN tag. This is true whether -stripping is enabled or disabled. - -This area of the API is in flux, so behaviour may change in the -future. - -Signed-off-by: John Daley ---- - drivers/net/enic/enic_rxtx.c | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - -diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c -index 02b54dd..6a95389 100644 ---- a/drivers/net/enic/enic_rxtx.c -+++ b/drivers/net/enic/enic_rxtx.c -@@ -206,12 +206,15 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) - if (unlikely(!enic_cq_rx_desc_eop(ciflags))) - goto mbuf_flags_done; - -- /* VLAN stripping */ -+ /* VLAN stripping. Set PKT_RX_VLAN_PKT only if there is a vlan tag -+ * in the packet passed up -+ */ - if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { -- pkt_flags |= PKT_RX_VLAN_PKT; - mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); - } else { - mbuf->vlan_tci = 0; -+ if (enic_cq_rx_desc_vlan(cqrd)) -+ pkt_flags |= PKT_RX_VLAN_PKT; - } - - /* RSS flag */ --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0015-ENIC-counter-improvement.patch b/dpdk/dpdk-16.04_patches/0015-ENIC-counter-improvement.patch deleted file mode 100644 index 721fd107..00000000 --- a/dpdk/dpdk-16.04_patches/0015-ENIC-counter-improvement.patch +++ /dev/null @@ -1,165 +0,0 @@ -From 30a3d6e23880094edfc51b49b11099c8b8bfa8cd Mon Sep 17 00:00:00 2001 -From: John Lo -Date: Tue, 7 Jun 2016 12:36:23 +0200 -Subject: [PATCH 15/17] ENIC counter improvement - ---- - drivers/net/enic/enic.h | 7 +++++++ - drivers/net/enic/enic_main.c | 38 ++++++++++++++++++++++++++++++++++---- - drivers/net/enic/enic_rxtx.c | 15 +++++++-------- - 3 files changed, 48 insertions(+), 12 deletions(-) - -diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h -index 43b82a6..7c1b5c9 100644 ---- a/drivers/net/enic/enic.h -+++ b/drivers/net/enic/enic.h -@@ -91,6 +91,11 @@ struct enic_fdir { - struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX]; - }; - -+struct enic_soft_stats { -+ rte_atomic64_t rx_nombuf; -+ rte_atomic64_t rx_packet_errors; -+}; -+ - /* Per-instance private data structure */ - struct enic { - struct enic *next; -@@ -133,6 +138,8 @@ struct enic { - /* interrupt resource */ - struct vnic_intr intr; - unsigned int intr_count; -+ -+ struct enic_soft_stats soft_stats; - }; - - static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq) -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 9bfdec1..a00565a 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -142,22 +142,51 @@ static void enic_log_q_error(struct enic *enic) - } - - -+static void enic_clear_soft_stats(struct enic *enic) -+{ -+ struct enic_soft_stats *soft_stats = &enic->soft_stats; -+ rte_atomic64_clear(&soft_stats->rx_nombuf); -+ rte_atomic64_clear(&soft_stats->rx_packet_errors); -+} -+ -+static void enic_init_soft_stats(struct enic *enic) -+{ -+ struct enic_soft_stats *soft_stats = &enic->soft_stats; -+ rte_atomic64_init(&soft_stats->rx_nombuf); -+ rte_atomic64_init(&soft_stats->rx_packet_errors); -+ enic_clear_soft_stats(enic); -+} -+ - void enic_dev_stats_clear(struct enic *enic) - { - if (vnic_dev_stats_clear(enic->vdev)) - dev_err(enic, "Error in clearing stats\n"); -+ enic_clear_soft_stats(enic); - } - - void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) - { - struct vnic_stats *stats; -+ struct enic_soft_stats *soft_stats = &enic->soft_stats; -+ int64_t rx_truncated; -+ uint64_t rx_packet_errors; - - if (vnic_dev_stats_dump(enic->vdev, &stats)) { - dev_err(enic, "Error in getting stats\n"); - return; - } - -- r_stats->ipackets = stats->rx.rx_frames_ok; -+ /* The number of truncated packets can only be calculated by -+ * subtracting a hardware counter from error packets received by -+ * the driver. Note: this causes transient inaccuracies in the -+ * ipackets count. Also, the length of truncated packets are -+ * counted in ibytes even though truncated packets are dropped -+ * which can make ibytes be slightly higher than it should be. -+ */ -+ rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors); -+ rx_truncated = rx_packet_errors - stats->rx.rx_errors; -+ -+ r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated; - r_stats->opackets = stats->tx.tx_frames_ok; - - r_stats->ibytes = stats->rx.rx_bytes_ok; -@@ -166,10 +195,9 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) - r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop; - r_stats->oerrors = stats->tx.tx_errors; - -- r_stats->imissed = stats->rx.rx_no_bufs; -+ r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated; - -- r_stats->imcasts = stats->rx.rx_multicast_frames_ok; -- r_stats->rx_nombuf = stats->rx.rx_no_bufs; -+ r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf); - } - - void enic_del_mac_address(struct enic *enic) -@@ -755,6 +783,8 @@ int enic_setup_finish(struct enic *enic) - { - int ret; - -+ enic_init_soft_stats(enic); -+ - ret = enic_set_rss_nic_cfg(enic); - if (ret) { - dev_err(enic, "Failed to config nic, aborting.\n"); -diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c -index 138dfb8..174486b 100644 ---- a/drivers/net/enic/enic_rxtx.c -+++ b/drivers/net/enic/enic_rxtx.c -@@ -251,6 +251,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - struct vnic_cq *cq; - volatile struct cq_desc *cqd_ptr; - uint8_t color; -+ uint16_t nb_err = 0; - - cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */ -@@ -278,10 +279,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - /* allocate a new mbuf */ - nmb = rte_rxmbuf_alloc(rq->mp); - if (nmb == NULL) { -- dev_err(enic, "RX mbuf alloc failed port=%u qid=%u", -- enic->port_id, (unsigned)rq->index); -- rte_eth_devices[enic->port_id]. -- data->rx_mbuf_alloc_failed++; -+ rte_atomic64_inc(&enic->soft_stats.rx_nombuf); - break; - } - -@@ -323,9 +321,10 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); - enic_cq_rx_to_pkt_flags(&cqd, rxmb); - } else { -- rxmb->pkt_len = 0; -- rxmb->packet_type = 0; -- rxmb->ol_flags = 0; -+ rte_pktmbuf_free(rxmb); -+ rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); -+ nb_err++; -+ continue; - } - rxmb->data_len = rxmb->pkt_len; - -@@ -337,7 +336,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - rx_pkts[nb_rx++] = rxmb; - } - -- nb_hold += nb_rx; -+ nb_hold += nb_rx + nb_err; - cq->to_clean = rx_id; - - if (nb_hold > rq->rx_free_thresh) { --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0016-ENIC-scatter-RX.patch b/dpdk/dpdk-16.04_patches/0016-ENIC-scatter-RX.patch deleted file mode 100644 index e0daab06..00000000 --- a/dpdk/dpdk-16.04_patches/0016-ENIC-scatter-RX.patch +++ /dev/null @@ -1,672 +0,0 @@ -From f03d5a02fc2b3cc24bf059a273ea1473cdb9993b Mon Sep 17 00:00:00 2001 -From: John Lo -Date: Tue, 7 Jun 2016 12:40:07 +0200 -Subject: [PATCH 16/17] ENIC scatter RX - ---- - drivers/net/enic/base/rq_enet_desc.h | 2 +- - drivers/net/enic/base/vnic_rq.c | 12 +- - drivers/net/enic/base/vnic_rq.h | 18 ++- - drivers/net/enic/enic.h | 10 ++ - drivers/net/enic/enic_main.c | 236 +++++++++++++++++++++++++++-------- - drivers/net/enic/enic_rxtx.c | 139 ++++++++++++++------- - 6 files changed, 313 insertions(+), 104 deletions(-) - -diff --git a/drivers/net/enic/base/rq_enet_desc.h b/drivers/net/enic/base/rq_enet_desc.h -index 7292d9d..13e24b4 100644 ---- a/drivers/net/enic/base/rq_enet_desc.h -+++ b/drivers/net/enic/base/rq_enet_desc.h -@@ -55,7 +55,7 @@ enum rq_enet_type_types { - #define RQ_ENET_TYPE_BITS 2 - #define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1) - --static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, -+static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc, - u64 address, u8 type, u16 length) - { - desc->address = cpu_to_le64(address); -diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c -index cb62c5e..d97f93e 100644 ---- a/drivers/net/enic/base/vnic_rq.c -+++ b/drivers/net/enic/base/vnic_rq.c -@@ -84,11 +84,16 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, - iowrite32(cq_index, &rq->ctrl->cq_index); - iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); - iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); -- iowrite32(0, &rq->ctrl->dropped_packet_count); - iowrite32(0, &rq->ctrl->error_status); - iowrite32(fetch_index, &rq->ctrl->fetch_index); - iowrite32(posted_index, &rq->ctrl->posted_index); -- -+ if (rq->is_sop) { -+// printf("Writing 0x%x to %s rq\n", -+// ((rq->is_sop << 10) | rq->data_queue_idx), -+// rq->is_sop ? "sop":"data"); -+ iowrite32(((rq->is_sop << 10) | rq->data_queue_idx), -+ &rq->ctrl->data_ring); -+ } - } - - void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, -@@ -96,6 +101,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, - unsigned int error_interrupt_offset) - { - u32 fetch_index = 0; -+ - /* Use current fetch_index as the ring starting point */ - fetch_index = ioread32(&rq->ctrl->fetch_index); - -@@ -110,6 +116,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, - error_interrupt_offset); - rq->rxst_idx = 0; - rq->tot_pkts = 0; -+ rq->pkt_first_seg = NULL; -+ rq->pkt_last_seg = NULL; - } - - void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error) -diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h -index 424415c..d1e2f52 100644 ---- a/drivers/net/enic/base/vnic_rq.h -+++ b/drivers/net/enic/base/vnic_rq.h -@@ -60,10 +60,18 @@ struct vnic_rq_ctrl { - u32 pad7; - u32 error_status; /* 0x48 */ - u32 pad8; -- u32 dropped_packet_count; /* 0x50 */ -+ u32 tcp_sn; /* 0x50 */ - u32 pad9; -- u32 dropped_packet_count_rc; /* 0x58 */ -+ u32 unused; /* 0x58 */ - u32 pad10; -+ u32 dca_select; /* 0x60 */ -+ u32 pad11; -+ u32 dca_value; /* 0x68 */ -+ u32 pad12; -+ u32 data_ring; /* 0x70 */ -+ u32 pad13; -+ u32 header_split; /* 0x78 */ -+ u32 pad14; - }; - - struct vnic_rq { -@@ -82,6 +90,12 @@ struct vnic_rq { - struct rte_mempool *mp; - uint16_t rxst_idx; - uint32_t tot_pkts; -+ uint16_t data_queue_idx; -+ uint8_t is_sop; -+ uint8_t in_use; -+ struct rte_mbuf *pkt_first_seg; -+ struct rte_mbuf *pkt_last_seg; -+ unsigned int max_mbufs_per_pkt; - }; - - static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) -diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h -index 7c1b5c9..d2de6ee 100644 ---- a/drivers/net/enic/enic.h -+++ b/drivers/net/enic/enic.h -@@ -142,6 +142,16 @@ struct enic { - struct enic_soft_stats soft_stats; - }; - -+static inline unsigned int enic_sop_rq(__rte_unused struct enic *enic, unsigned int rq) -+{ -+ return rq * 2; -+} -+ -+static inline unsigned int enic_data_rq(__rte_unused struct enic *enic, unsigned int rq) -+{ -+ return rq * 2 + 1; -+} -+ - static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq) - { - return rq; -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index a00565a..be17707 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -247,15 +247,23 @@ void enic_init_vnic_resources(struct enic *enic) - unsigned int error_interrupt_offset = 0; - unsigned int index = 0; - unsigned int cq_idx; -+ struct vnic_rq *data_rq; - - vnic_dev_stats_clear(enic->vdev); - - for (index = 0; index < enic->rq_count; index++) { -- vnic_rq_init(&enic->rq[index], -+ vnic_rq_init(&enic->rq[enic_sop_rq(enic, index)], - enic_cq_rq(enic, index), - error_interrupt_enable, - error_interrupt_offset); - -+ data_rq = &enic->rq[enic_data_rq(enic, index)]; -+ if (data_rq->in_use) -+ vnic_rq_init(data_rq, -+ enic_cq_rq(enic, index), -+ error_interrupt_enable, -+ error_interrupt_offset); -+ - cq_idx = enic_cq_rq(enic, index); - vnic_cq_init(&enic->cq[cq_idx], - 0 /* flow_control_enable */, -@@ -305,6 +313,9 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) - unsigned i; - dma_addr_t dma_addr; - -+ if (!rq->in_use) -+ return 0; -+ - dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index, - rq->ring.desc_count); - -@@ -316,20 +327,20 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) - return -ENOMEM; - } - -- dma_addr = (dma_addr_t)(mb->buf_physaddr -- + RTE_PKTMBUF_HEADROOM); -- -- rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP, -- mb->buf_len - RTE_PKTMBUF_HEADROOM); -+ dma_addr = (dma_addr_t)(mb->buf_physaddr + RTE_PKTMBUF_HEADROOM); -+ rq_enet_desc_enc(rqd, dma_addr, -+ (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP -+ : RQ_ENET_TYPE_NOT_SOP), -+ mb->buf_len - RTE_PKTMBUF_HEADROOM); - rq->mbuf_ring[i] = mb; - } - - /* make sure all prior writes are complete before doing the PIO write */ - rte_rmb(); - -- /* Post all but the last 2 cache lines' worth of descriptors */ -- rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE -- / sizeof(struct rq_enet_desc)); -+ /* Post all but the last buffer to VIC. */ -+ rq->posted_index = rq->ring.desc_count - 1; -+ - rq->rx_nb_hold = 0; - - dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", -@@ -337,6 +348,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) - iowrite32(rq->posted_index, &rq->ctrl->posted_index); - rte_rmb(); - -+// printf("posted %d buffers to %s rq\n", rq->ring.desc_count, -+// rq->is_sop ? "sop" : "data"); - return 0; - - } -@@ -398,17 +411,25 @@ int enic_enable(struct enic *enic) - "Flow director feature will not work\n"); - - for (index = 0; index < enic->rq_count; index++) { -- err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]); -+ err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_sop_rq(enic, index)]); - if (err) { -- dev_err(enic, "Failed to alloc RX queue mbufs\n"); -+ dev_err(enic, "Failed to alloc sop RX queue mbufs\n"); -+ return err; -+ } -+ err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_data_rq(enic, index)]); -+ if (err) { -+ /* release the previously allocated mbufs for the sop rq */ -+ enic_rxmbuf_queue_release(enic, &enic->rq[enic_sop_rq(enic, index)]); -+ -+ dev_err(enic, "Failed to alloc data RX queue mbufs\n"); - return err; - } - } - - for (index = 0; index < enic->wq_count; index++) -- vnic_wq_enable(&enic->wq[index]); -+ enic_start_wq(enic, index); - for (index = 0; index < enic->rq_count; index++) -- vnic_rq_enable(&enic->rq[index]); -+ enic_start_rq(enic, index); - - vnic_dev_enable_wait(enic->vdev); - -@@ -440,14 +461,26 @@ int enic_alloc_intr_resources(struct enic *enic) - - void enic_free_rq(void *rxq) - { -- struct vnic_rq *rq = (struct vnic_rq *)rxq; -- struct enic *enic = vnic_dev_priv(rq->vdev); -+ struct vnic_rq *rq_sop = (struct vnic_rq *)rxq; -+ struct enic *enic = vnic_dev_priv(rq_sop->vdev); -+ struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx]; - -- enic_rxmbuf_queue_release(enic, rq); -- rte_free(rq->mbuf_ring); -- rq->mbuf_ring = NULL; -- vnic_rq_free(rq); -- vnic_cq_free(&enic->cq[rq->index]); -+ enic_rxmbuf_queue_release(enic, rq_sop); -+ if (rq_data->in_use) -+ enic_rxmbuf_queue_release(enic, rq_data); -+ -+ rte_free(rq_sop->mbuf_ring); -+ if (rq_data->in_use) -+ rte_free(rq_data->mbuf_ring); -+ -+ rq_sop->mbuf_ring = NULL; -+ rq_data->mbuf_ring = NULL; -+ -+ vnic_rq_free(rq_sop); -+ if (rq_data->in_use) -+ vnic_rq_free(rq_data); -+ -+ vnic_cq_free(&enic->cq[rq_sop->index]); - } - - void enic_start_wq(struct enic *enic, uint16_t queue_idx) -@@ -462,12 +495,32 @@ int enic_stop_wq(struct enic *enic, uint16_t queue_idx) - - void enic_start_rq(struct enic *enic, uint16_t queue_idx) - { -- vnic_rq_enable(&enic->rq[queue_idx]); -+ struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(enic, queue_idx)]; -+ struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx]; -+ -+ if (rq_data->in_use) -+ vnic_rq_enable(rq_data); -+ rte_mb(); -+ vnic_rq_enable(rq_sop); -+ - } - - int enic_stop_rq(struct enic *enic, uint16_t queue_idx) - { -- return vnic_rq_disable(&enic->rq[queue_idx]); -+ int ret1 = 0, ret2 = 0; -+ -+ struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(enic, queue_idx)]; -+ struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx]; -+ -+ ret2 = vnic_rq_disable(rq_sop); -+ rte_mb(); -+ if (rq_data->in_use) -+ ret1 = vnic_rq_disable(rq_data); -+ -+ if (ret2) -+ return ret2; -+ else -+ return ret1; - } - - int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, -@@ -475,53 +528,128 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, - uint16_t nb_desc) - { - int rc; -- struct vnic_rq *rq = &enic->rq[queue_idx]; -- -- rq->socket_id = socket_id; -- rq->mp = mp; -+ uint16_t sop_queue_idx = enic_sop_rq(enic, queue_idx); -+ uint16_t data_queue_idx = enic_data_rq(enic, queue_idx); -+ struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx]; -+ struct vnic_rq *rq_data = &enic->rq[data_queue_idx]; -+ unsigned int mbuf_size, mbufs_per_pkt; -+ unsigned int nb_sop_desc, nb_data_desc; -+ uint16_t min_sop, max_sop, min_data, max_data; -+ -+ rq_sop->is_sop = 1; -+ rq_sop->data_queue_idx = data_queue_idx; -+ rq_data->is_sop = 0; -+ rq_data->data_queue_idx = 0; -+ rq_sop->socket_id = socket_id; -+ rq_sop->mp = mp; -+ rq_data->socket_id = socket_id; -+ rq_data->mp = mp; -+ rq_sop->in_use = 1; -+ -+ mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM); -+ -+ /* ceil(mtu/mbuf_size) */ -+ mbufs_per_pkt = (enic->config.mtu + (mbuf_size - 1)) / mbuf_size; -+ -+ if (mbufs_per_pkt > 1) -+ rq_data->in_use = 1; -+ else -+ rq_data->in_use = 0; -+ -+ /* number of descriptors have to be a multiple of 32 */ -+ nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F; -+ nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F; -+ -+ rq_sop->max_mbufs_per_pkt = mbufs_per_pkt; -+ rq_data->max_mbufs_per_pkt = mbufs_per_pkt; -+ -+ //printf("mtu = %u, mbuf_size = %u, mbuf_per_pkt = %u\n", -+ // enic->config.mtu, mbuf_size, mbufs_per_pkt); -+ -+ if (mbufs_per_pkt > 1) { -+ min_sop = 64; -+ max_sop = ((enic->config.rq_desc_count / (mbufs_per_pkt - 1)) & ~0x1F); -+ min_data = min_sop * (mbufs_per_pkt - 1); -+ max_data = enic->config.rq_desc_count; -+ } else { -+ min_sop = 64; -+ max_sop = enic->config.rq_desc_count; -+ min_data = 0; -+ max_data = 0; -+ } - -- if (nb_desc) { -- if (nb_desc > enic->config.rq_desc_count) { -- dev_warning(enic, -- "RQ %d - number of rx desc in cmd line (%d)"\ -- "is greater than that in the UCSM/CIMC adapter"\ -- "policy. Applying the value in the adapter "\ -- "policy (%d).\n", -- queue_idx, nb_desc, enic->config.rq_desc_count); -- nb_desc = enic->config.rq_desc_count; -- } -- dev_info(enic, "RX Queues - effective number of descs:%d\n", -- nb_desc); -+ if (nb_desc < (min_sop + min_data)) { -+ dev_warning(enic, -+ "Number of rx descs too low, adjusting to minimum\n"); -+ nb_sop_desc = min_sop; -+ nb_data_desc = min_data; -+ } else if (nb_desc > (max_sop + max_data)){ -+ dev_warning(enic, -+ "Number of rx_descs too high, adjusting to maximum\n"); -+ nb_sop_desc = max_sop; -+ nb_data_desc = max_data; - } -+ dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n", -+ enic->config.mtu, mbuf_size, min_sop + min_data, max_sop + max_data); - -- /* Allocate queue resources */ -- rc = vnic_rq_alloc(enic->vdev, rq, queue_idx, -- nb_desc, sizeof(struct rq_enet_desc)); -+ dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n", -+ nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc); -+ -+ /* Allocate sop queue resources */ -+ rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx, -+ nb_sop_desc, sizeof(struct rq_enet_desc)); - if (rc) { -- dev_err(enic, "error in allocation of rq\n"); -+ dev_err(enic, "error in allocation of sop rq\n"); - goto err_exit; - } -- -+ nb_sop_desc = rq_sop->ring.desc_count; -+ -+ if (rq_data->in_use) { -+ /* Allocate data queue resources */ -+ rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx, -+ nb_data_desc, -+ sizeof(struct rq_enet_desc)); -+ if (rc) { -+ dev_err(enic, "error in allocation of data rq\n"); -+ goto err_free_rq_sop; -+ } -+ nb_data_desc = rq_data->ring.desc_count; -+ } - rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx, -- socket_id, nb_desc, -- sizeof(struct cq_enet_rq_desc)); -+ socket_id, nb_sop_desc + nb_data_desc, -+ sizeof(struct cq_enet_rq_desc)); - if (rc) { - dev_err(enic, "error in allocation of cq for rq\n"); -- goto err_free_rq_exit; -+ goto err_free_rq_data; - } - -- /* Allocate the mbuf ring */ -- rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring", -- sizeof(struct rte_mbuf *) * nb_desc, -- RTE_CACHE_LINE_SIZE, rq->socket_id); -+ /* Allocate the mbuf rings */ -+ rq_sop->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring", -+ sizeof(struct rte_mbuf *) * nb_sop_desc, -+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id); -+ if (rq_sop->mbuf_ring == NULL) -+ goto err_free_cq; -+ -+ if (rq_data->in_use) { -+ rq_data->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring", -+ sizeof(struct rte_mbuf *) * nb_data_desc, -+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id); -+ if (rq_data->mbuf_ring == NULL) -+ goto err_free_sop_mbuf; -+ } - -- if (rq->mbuf_ring != NULL) -- return 0; -+ return 0; - -+err_free_sop_mbuf: -+ rte_free(rq_sop->mbuf_ring); -+err_free_cq: - /* cleanup on error */ - vnic_cq_free(&enic->cq[queue_idx]); --err_free_rq_exit: -- vnic_rq_free(rq); -+err_free_rq_data: -+ if (rq_data->in_use) -+ vnic_rq_free(rq_data); -+err_free_rq_sop: -+ vnic_rq_free(rq_sop); - err_exit: - return -ENOMEM; - } -diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c -index 174486b..463b954 100644 ---- a/drivers/net/enic/enic_rxtx.c -+++ b/drivers/net/enic/enic_rxtx.c -@@ -242,22 +242,27 @@ uint16_t - enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) - { -- struct vnic_rq *rq = rx_queue; -- struct enic *enic = vnic_dev_priv(rq->vdev); -- unsigned int rx_id; -+ struct vnic_rq *sop_rq = rx_queue; -+ struct vnic_rq *data_rq; -+ struct vnic_rq *rq; -+ struct enic *enic = vnic_dev_priv(sop_rq->vdev); -+ uint16_t cq_idx; -+ uint16_t rq_idx; -+ uint16_t rq_num; - struct rte_mbuf *nmb, *rxmb; - uint16_t nb_rx = 0; -- uint16_t nb_hold; - struct vnic_cq *cq; - volatile struct cq_desc *cqd_ptr; - uint8_t color; -- uint16_t nb_err = 0; -+ uint16_t seg_length; -+ struct rte_mbuf *first_seg = sop_rq->pkt_first_seg; -+ struct rte_mbuf *last_seg = sop_rq->pkt_last_seg; - -- cq = &enic->cq[enic_cq_rq(enic, rq->index)]; -- rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */ -- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -+ cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)]; -+ cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */ -+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; - -- nb_hold = rq->rx_nb_hold; /* mbufs held by software */ -+ data_rq = &enic->rq[sop_rq->data_queue_idx]; - - while (nb_rx < nb_pkts) { - volatile struct rq_enet_desc *rqd_ptr; -@@ -265,6 +270,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - struct cq_desc cqd; - uint64_t ol_err_flags; - uint8_t packet_error; -+ uint16_t ciflags; - - /* Check for pkts available */ - color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) -@@ -272,9 +278,13 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - if (color == cq->last_color) - break; - -- /* Get the cq descriptor and rq pointer */ -+ /* Get the cq descriptor and extract rq info from it */ - cqd = *cqd_ptr; -- rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id; -+ rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK; -+ rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK; -+ -+ rq = &enic->rq[rq_num]; -+ rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx; - - /* allocate a new mbuf */ - nmb = rte_rxmbuf_alloc(rq->mp); -@@ -287,67 +297,106 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags); - - /* Get the mbuf to return and replace with one just allocated */ -- rxmb = rq->mbuf_ring[rx_id]; -- rq->mbuf_ring[rx_id] = nmb; -+ rxmb = rq->mbuf_ring[rq_idx]; -+ rq->mbuf_ring[rq_idx] = nmb; - - /* Increment cqd, rqd, mbuf_table index */ -- rx_id++; -- if (unlikely(rx_id == rq->ring.desc_count)) { -- rx_id = 0; -+ cq_idx++; -+ if (unlikely(cq_idx == cq->ring.desc_count)) { -+ cq_idx = 0; - cq->last_color = cq->last_color ? 0 : 1; - } - - /* Prefetch next mbuf & desc while processing current one */ -- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; - rte_enic_prefetch(cqd_ptr); -- rte_enic_prefetch(rq->mbuf_ring[rx_id]); -- rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs) -- + rx_id); -+// rte_enic_prefetch(rq->mbuf_ring[rx_id]); -+// rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs) -+// + rx_id); -+ -+ ciflags = enic_cq_rx_desc_ciflags((struct cq_enet_rq_desc *) &cqd); - - /* Push descriptor for newly allocated mbuf */ -- dma_addr = (dma_addr_t)(nmb->buf_physaddr -- + RTE_PKTMBUF_HEADROOM); -- rqd_ptr->address = rte_cpu_to_le_64(dma_addr); -- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len -- - RTE_PKTMBUF_HEADROOM); -+ -+ dma_addr = (dma_addr_t)(nmb->buf_physaddr + RTE_PKTMBUF_HEADROOM); -+ rq_enet_desc_enc(rqd_ptr, dma_addr, -+ (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP -+ : RQ_ENET_TYPE_NOT_SOP), -+ nmb->buf_len - RTE_PKTMBUF_HEADROOM); - - /* Fill in the rest of the mbuf */ -- rxmb->data_off = RTE_PKTMBUF_HEADROOM; -- rxmb->nb_segs = 1; -+ seg_length = enic_cq_rx_desc_n_bytes(&cqd); -+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); -+ enic_cq_rx_to_pkt_flags(&cqd, rxmb); -+ if (rq->is_sop) { -+ first_seg = rxmb; -+ first_seg->nb_segs = 1; -+ first_seg->pkt_len = seg_length; -+ } else { -+ first_seg->pkt_len = (uint16_t)(first_seg->pkt_len -+ + seg_length); -+ first_seg->nb_segs++; -+ last_seg->next = rxmb; -+ } -+ - rxmb->next = NULL; - rxmb->port = enic->port_id; -- if (!packet_error) { -- rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd); -- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); -- enic_cq_rx_to_pkt_flags(&cqd, rxmb); -- } else { -- rte_pktmbuf_free(rxmb); -+ rxmb->data_len = seg_length; -+ -+ rq->rx_nb_hold++; -+ -+ if (!(enic_cq_rx_desc_eop(ciflags))) { -+ last_seg = rxmb; -+ continue; -+ } -+ -+ if (unlikely(packet_error)) { -+ rte_pktmbuf_free(first_seg); - rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); -- nb_err++; -+ - continue; - } -- rxmb->data_len = rxmb->pkt_len; -+ -+ -+// printf("EOP: final packet length is %d\n", first_seg->pkt_len); -+// rte_pktmbuf_dump(stdout, first_seg, 64); - - /* prefetch mbuf data for caller */ -- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, -+ rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr, - RTE_PKTMBUF_HEADROOM)); - - /* store the mbuf address into the next entry of the array */ -- rx_pkts[nb_rx++] = rxmb; -+ rx_pkts[nb_rx++] = first_seg; - } - -- nb_hold += nb_rx + nb_err; -- cq->to_clean = rx_id; -+ sop_rq->pkt_first_seg = first_seg; -+ sop_rq->pkt_last_seg = last_seg; -+ -+ cq->to_clean = cq_idx; -+ -+ if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) > sop_rq->rx_free_thresh) { -+ if (data_rq->in_use) { -+ data_rq->posted_index = enic_ring_add(data_rq->ring.desc_count, -+ data_rq->posted_index, -+ data_rq->rx_nb_hold); -+ //printf("Processed %d data descs. Posted index now %d\n", -+ // data_rq->rx_nb_hold, data_rq->posted_index); -+ data_rq->rx_nb_hold = 0; -+ } -+ sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count, -+ sop_rq->posted_index, -+ sop_rq->rx_nb_hold); -+ //printf("Processed %d sop descs. Posted index now %d\n", -+ // sop_rq->rx_nb_hold, sop_rq->posted_index); -+ sop_rq->rx_nb_hold = 0; - -- if (nb_hold > rq->rx_free_thresh) { -- rq->posted_index = enic_ring_add(rq->ring.desc_count, -- rq->posted_index, nb_hold); -- nb_hold = 0; - rte_mb(); -- iowrite32(rq->posted_index, &rq->ctrl->posted_index); -+ if (data_rq->in_use) -+ iowrite32(data_rq->posted_index, &data_rq->ctrl->posted_index); -+ rte_compiler_barrier(); -+ iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index); - } - -- rq->rx_nb_hold = nb_hold; - - return nb_rx; - } --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch b/dpdk/dpdk-16.04_patches/0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch deleted file mode 100644 index 2553997c..00000000 --- a/dpdk/dpdk-16.04_patches/0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch +++ /dev/null @@ -1,40404 +0,0 @@ -From b8d83a0825f2d7d0d626c00f79de7b415f8dc344 Mon Sep 17 00:00:00 2001 -From: Sachin Saxena -Date: Fri, 17 Jun 2016 12:32:28 +0530 -Subject: [PATCH 17/17] NXP DPAA2 Poll Mode Driver Support - - Upstreaming of DPAA2 driver changes is in progress.This patch will - temporary add the support in VPP in built DPDK. - - Two types of changes: - 1. Driver specfic independent files. No impact on any other functionality. - 2. Changes in common EAL framework. These changes are done in compile time DPAA2 - specific flag, so no impact is expected on other existing features if not - compiling for DPAA2. - -Signed-off-by: Sachin Saxena ---- - config/defconfig_arm64-dpaa2-linuxapp-gcc | 61 + - drivers/net/Makefile | 1 + - drivers/net/dpaa2/Makefile | 102 + - drivers/net/dpaa2/dpaa2_logs.h | 77 + - drivers/net/dpaa2/mc/dpaiop.c | 457 ++++ - drivers/net/dpaa2/mc/dpbp.c | 432 ++++ - drivers/net/dpaa2/mc/dpci.c | 501 ++++ - drivers/net/dpaa2/mc/dpcon.c | 401 +++ - drivers/net/dpaa2/mc/dpdbg.c | 547 +++++ - drivers/net/dpaa2/mc/dpdcei.c | 449 ++++ - drivers/net/dpaa2/mc/dpdmai.c | 452 ++++ - drivers/net/dpaa2/mc/dpdmux.c | 567 +++++ - drivers/net/dpaa2/mc/dpio.c | 468 ++++ - drivers/net/dpaa2/mc/dpmac.c | 422 ++++ - drivers/net/dpaa2/mc/dpmcp.c | 312 +++ - drivers/net/dpaa2/mc/dpmng.c | 58 + - drivers/net/dpaa2/mc/dpni.c | 1907 +++++++++++++++ - drivers/net/dpaa2/mc/dprc.c | 786 ++++++ - drivers/net/dpaa2/mc/dprtc.c | 509 ++++ - drivers/net/dpaa2/mc/dpseci.c | 502 ++++ - drivers/net/dpaa2/mc/dpsw.c | 1639 +++++++++++++ - drivers/net/dpaa2/mc/fsl_dpaiop.h | 494 ++++ - drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h | 190 ++ - drivers/net/dpaa2/mc/fsl_dpbp.h | 438 ++++ - drivers/net/dpaa2/mc/fsl_dpbp_cmd.h | 172 ++ - drivers/net/dpaa2/mc/fsl_dpci.h | 594 +++++ - drivers/net/dpaa2/mc/fsl_dpci_cmd.h | 200 ++ - drivers/net/dpaa2/mc/fsl_dpcon.h | 407 +++ - drivers/net/dpaa2/mc/fsl_dpcon_cmd.h | 162 ++ - drivers/net/dpaa2/mc/fsl_dpdbg.h | 635 +++++ - drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h | 249 ++ - drivers/net/dpaa2/mc/fsl_dpdcei.h | 515 ++++ - drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h | 182 ++ - drivers/net/dpaa2/mc/fsl_dpdmai.h | 521 ++++ - drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h | 191 ++ - drivers/net/dpaa2/mc/fsl_dpdmux.h | 724 ++++++ - drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 256 ++ - drivers/net/dpaa2/mc/fsl_dpio.h | 460 ++++ - drivers/net/dpaa2/mc/fsl_dpio_cmd.h | 184 ++ - drivers/net/dpaa2/mc/fsl_dpkg.h | 174 ++ - drivers/net/dpaa2/mc/fsl_dpmac.h | 593 +++++ - drivers/net/dpaa2/mc/fsl_dpmac_cmd.h | 195 ++ - drivers/net/dpaa2/mc/fsl_dpmcp.h | 332 +++ - drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h | 135 + - drivers/net/dpaa2/mc/fsl_dpmng.h | 74 + - drivers/net/dpaa2/mc/fsl_dpmng_cmd.h | 46 + - drivers/net/dpaa2/mc/fsl_dpni.h | 2581 ++++++++++++++++++++ - drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 1058 ++++++++ - drivers/net/dpaa2/mc/fsl_dprc.h | 1032 ++++++++ - drivers/net/dpaa2/mc/fsl_dprc_cmd.h | 755 ++++++ - drivers/net/dpaa2/mc/fsl_dprtc.h | 434 ++++ - drivers/net/dpaa2/mc/fsl_dprtc_cmd.h | 181 ++ - drivers/net/dpaa2/mc/fsl_dpseci.h | 647 +++++ - drivers/net/dpaa2/mc/fsl_dpseci_cmd.h | 241 ++ - drivers/net/dpaa2/mc/fsl_dpsw.h | 2164 ++++++++++++++++ - drivers/net/dpaa2/mc/fsl_dpsw_cmd.h | 916 +++++++ - drivers/net/dpaa2/mc/fsl_mc_cmd.h | 221 ++ - drivers/net/dpaa2/mc/fsl_mc_sys.h | 95 + - drivers/net/dpaa2/mc/fsl_net.h | 480 ++++ - drivers/net/dpaa2/mc/mc_sys.c | 129 + - drivers/net/dpaa2/qbman/driver/qbman_debug.c | 926 +++++++ - drivers/net/dpaa2/qbman/driver/qbman_debug.h | 140 ++ - drivers/net/dpaa2/qbman/driver/qbman_portal.c | 1407 +++++++++++ - drivers/net/dpaa2/qbman/driver/qbman_portal.h | 266 ++ - drivers/net/dpaa2/qbman/driver/qbman_private.h | 165 ++ - drivers/net/dpaa2/qbman/driver/qbman_sys.h | 367 +++ - drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h | 68 + - drivers/net/dpaa2/qbman/include/compat.h | 597 +++++ - .../dpaa2/qbman/include/drivers/fsl_qbman_base.h | 151 ++ - .../dpaa2/qbman/include/drivers/fsl_qbman_portal.h | 1089 +++++++++ - drivers/net/dpaa2/rte_eth_dpaa2_pvt.h | 313 +++ - drivers/net/dpaa2/rte_eth_dpbp.c | 430 ++++ - drivers/net/dpaa2/rte_eth_dpio.c | 339 +++ - drivers/net/dpaa2/rte_eth_dpni.c | 2230 +++++++++++++++++ - drivers/net/dpaa2/rte_eth_dpni_annot.h | 311 +++ - drivers/net/dpaa2/rte_pmd_dpaa2_version.map | 4 + - lib/librte_eal/common/eal_private.h | 12 + - lib/librte_eal/linuxapp/eal/Makefile | 11 + - lib/librte_eal/linuxapp/eal/eal.c | 10 + - lib/librte_eal/linuxapp/eal/eal_soc.c | 84 + - lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c | 653 +++++ - lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h | 102 + - lib/librte_mbuf/Makefile | 4 + - lib/librte_mbuf/rte_mbuf.c | 67 + - lib/librte_mempool/Makefile | 4 + - lib/librte_mempool/rte_mempool.c | 13 + - lib/librte_mempool/rte_mempool.h | 30 +- - mk/machine/dpaa2/rte.vars.mk | 60 + - mk/rte.app.mk | 1 + - 89 files changed, 39560 insertions(+), 1 deletion(-) - create mode 100644 config/defconfig_arm64-dpaa2-linuxapp-gcc - create mode 100644 drivers/net/dpaa2/Makefile - create mode 100644 drivers/net/dpaa2/dpaa2_logs.h - create mode 100644 drivers/net/dpaa2/mc/dpaiop.c - create mode 100644 drivers/net/dpaa2/mc/dpbp.c - create mode 100644 drivers/net/dpaa2/mc/dpci.c - create mode 100644 drivers/net/dpaa2/mc/dpcon.c - create mode 100644 drivers/net/dpaa2/mc/dpdbg.c - create mode 100644 drivers/net/dpaa2/mc/dpdcei.c - create mode 100644 drivers/net/dpaa2/mc/dpdmai.c - create mode 100644 drivers/net/dpaa2/mc/dpdmux.c - create mode 100644 drivers/net/dpaa2/mc/dpio.c - create mode 100644 drivers/net/dpaa2/mc/dpmac.c - create mode 100644 drivers/net/dpaa2/mc/dpmcp.c - create mode 100644 drivers/net/dpaa2/mc/dpmng.c - create mode 100644 drivers/net/dpaa2/mc/dpni.c - create mode 100644 drivers/net/dpaa2/mc/dprc.c - create mode 100644 drivers/net/dpaa2/mc/dprtc.c - create mode 100644 drivers/net/dpaa2/mc/dpseci.c - create mode 100644 drivers/net/dpaa2/mc/dpsw.c - create mode 100644 drivers/net/dpaa2/mc/fsl_dpaiop.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpbp.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpbp_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpci.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpci_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpcon.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpcon_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdbg.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdcei.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmai.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmux.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpio.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpio_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpkg.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmac.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmac_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmcp.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmng.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmng_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpni.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpni_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dprc.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dprc_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dprtc.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dprtc_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpseci.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpseci_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpsw.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpsw_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_mc_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_mc_sys.h - create mode 100644 drivers/net/dpaa2/mc/fsl_net.h - create mode 100644 drivers/net/dpaa2/mc/mc_sys.c - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_debug.c - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_debug.h - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_portal.c - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_portal.h - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_private.h - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_sys.h - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h - create mode 100644 drivers/net/dpaa2/qbman/include/compat.h - create mode 100644 drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h - create mode 100644 drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h - create mode 100644 drivers/net/dpaa2/rte_eth_dpaa2_pvt.h - create mode 100644 drivers/net/dpaa2/rte_eth_dpbp.c - create mode 100644 drivers/net/dpaa2/rte_eth_dpio.c - create mode 100644 drivers/net/dpaa2/rte_eth_dpni.c - create mode 100644 drivers/net/dpaa2/rte_eth_dpni_annot.h - create mode 100644 drivers/net/dpaa2/rte_pmd_dpaa2_version.map - create mode 100644 lib/librte_eal/linuxapp/eal/eal_soc.c - create mode 100644 lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c - create mode 100644 lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h - create mode 100644 mk/machine/dpaa2/rte.vars.mk - -diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc -new file mode 100644 -index 0000000..fafbef4 ---- /dev/null -+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc -@@ -0,0 +1,61 @@ -+# BSD LICENSE -+# -+# Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved. -+# -+# Redistribution and use in source and binary forms, with or without -+# modification, are permitted provided that the following conditions -+# are met: -+# -+# * Redistributions of source code must retain the above copyright -+# notice, this list of conditions and the following disclaimer. -+# * Redistributions in binary form must reproduce the above copyright -+# notice, this list of conditions and the following disclaimer in -+# the documentation and/or other materials provided with the -+# distribution. -+# * Neither the name of Freescale Semiconductor nor the names of its -+# contributors may be used to endorse or promote products derived -+# from this software without specific prior written permission. -+# -+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+# -+ -+#include "defconfig_arm64-armv8a-linuxapp-gcc" -+ -+# NXP (Freescale) - Soc Architecture with WRIOP and QBMAN support -+CONFIG_RTE_MACHINE="dpaa2" -+CONFIG_RTE_ARCH_ARM_TUNE="cortex-a57+fp+simd" -+ -+# -+# Compile Environment Abstraction Layer -+# -+CONFIG_RTE_MAX_LCORE=8 -+CONFIG_RTE_MAX_NUMA_NODES=1 -+ -+# Compile software PMD backed by FSL DPAA2 files -+# -+CONFIG_RTE_LIBRTE_DPAA2_PMD=y -+CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=n -+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n -+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n -+CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n -+ -+CONFIG_RTE_LIBRTE_PMD_BOND=y -+CONFIG_RTE_CACHE_LINE_SIZE=128 -+CONFIG_RTE_EAL_IGB_UIO=n -+CONFIG_RTE_LIBRTE_KNI=n -+ -+#FSL DPAA2 caam driver -+CONFIG_RTE_LIBRTE_PMD_DPAA2_CAAM=n -+CONFIG_RTE_LIBRTE_DPAA2_CAAM_DEBUG_INIT=n -+CONFIG_RTE_LIBRTE_DPAA2_CAAM_DEBUG_DRIVER=n -+CONFIG_RTE_LIBRTE_DPAA2_CAAM_DEBUG_RX=n -diff --git a/drivers/net/Makefile b/drivers/net/Makefile -index 3386a67..ed10351 100644 ---- a/drivers/net/Makefile -+++ b/drivers/net/Makefile -@@ -52,6 +52,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2 - DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio - DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3 - DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt -+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2 - - ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y) - DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost -diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile -new file mode 100644 -index 0000000..3cf1782 ---- /dev/null -+++ b/drivers/net/dpaa2/Makefile -@@ -0,0 +1,102 @@ -+# BSD LICENSE -+# -+# Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved. -+# -+# Redistribution and use in source and binary forms, with or without -+# modification, are permitted provided that the following conditions -+# are met: -+# -+# * Redistributions of source code must retain the above copyright -+# notice, this list of conditions and the following disclaimer. -+# * Redistributions in binary form must reproduce the above copyright -+# notice, this list of conditions and the following disclaimer in -+# the documentation and/or other materials provided with the -+# distribution. -+# * Neither the name of Freescale Semiconductor nor the names of its -+# contributors may be used to endorse or promote products derived -+# from this software without specific prior written permission. -+# -+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+include $(RTE_SDK)/mk/rte.vars.mk -+ -+# -+# library name -+# -+LIB = librte_pmd_dpaa2.a -+ -+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y) -+CFLAGS += -O0 -g -+CFLAGS += "-Wno-error" -+else -+CFLAGS += -O3 -g -+CFLAGS += $(WERROR_FLAGS) -+endif -+CFLAGS +=-Wno-strict-aliasing -+CFLAGS +=-Wno-missing-prototypes -+CFLAGS +=-Wno-missing-declarations -+CFLAGS +=-Wno-unused-function -+ -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include/drivers -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/driver/ -+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include -+CFLAGS += -I$(RTE_SDK)/lib/librte_ether -+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal -+ -+EXPORT_MAP := rte_pmd_dpaa2_version.map -+ -+LIBABIVER := 1 -+# -+# all source are stored in SRCS-y -+# -+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += \ -+ mc/dprc.c \ -+ mc/dprtc.o \ -+ mc/dpaiop.c \ -+ mc/dpdbg.o \ -+ mc/dpdcei.c \ -+ mc/dpdmai.c \ -+ mc/dpmac.c \ -+ mc/dpmcp.c \ -+ mc/dpbp.c \ -+ mc/dpio.c \ -+ mc/dpni.c \ -+ mc/dpsw.c \ -+ mc/dpci.c \ -+ mc/dpcon.c \ -+ mc/dpseci.c \ -+ mc/dpmng.c \ -+ mc/dpdmux.c \ -+ mc/mc_sys.c -+ -+# -+# all source are stored in SRCS-y -+# -+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += \ -+ qbman/driver/qbman_portal.c \ -+ qbman/driver/qbman_debug.c -+ -+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpni.c -+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpio.c -+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpbp.c -+ -+# -+# Export include files -+# -+SYMLINK-y-include += -+ -+# this lib depends upon: -+DEPDIRS-y += lib/librte_eal -+include $(RTE_SDK)/mk/rte.lib.mk -diff --git a/drivers/net/dpaa2/dpaa2_logs.h b/drivers/net/dpaa2/dpaa2_logs.h -new file mode 100644 -index 0000000..319786a ---- /dev/null -+++ b/drivers/net/dpaa2/dpaa2_logs.h -@@ -0,0 +1,77 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+ -+#ifndef _DPAA2_LOGS_H_ -+#define _DPAA2_LOGS_H_ -+ -+#define PMD_INIT_LOG(level, fmt, args...) \ -+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) -+ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_INIT -+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") -+#else -+#define PMD_INIT_FUNC_TRACE() do { } while (0) -+#endif -+ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_RX -+#define PMD_RX_LOG(level, fmt, args...) \ -+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -+#else -+#define PMD_RX_LOG(level, fmt, args...) do { } while(0) -+#endif -+ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX -+#define PMD_TX_LOG(level, fmt, args...) \ -+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -+#else -+#define PMD_TX_LOG(level, fmt, args...) do { } while(0) -+#endif -+ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX_FREE -+#define PMD_TX_FREE_LOG(level, fmt, args...) \ -+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -+#else -+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0) -+#endif -+ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER -+#define PMD_DRV_LOG_RAW(level, fmt, args...) \ -+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) -+#else -+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) -+#endif -+ -+#define PMD_DRV_LOG(level, fmt, args...) \ -+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args) -+ -+#endif /* _DPAA2_LOGS_H_ */ -diff --git a/drivers/net/dpaa2/mc/dpaiop.c b/drivers/net/dpaa2/mc/dpaiop.c -new file mode 100644 -index 0000000..7c1ecff ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpaiop.c -@@ -0,0 +1,457 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpaiop_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpaiop_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPAIOP_CMD_OPEN(cmd, dpaiop_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpaiop_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpaiop_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ (void)(cfg); /* unused */ -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPAIOP_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpaiop_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpaiop_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpaiop_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpaiop_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpaiop_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpaiop_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPAIOP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpaiop_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPAIOP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dpaiop_load(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_load_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_LOAD, -+ cmd_flags, -+ token); -+ DPAIOP_CMD_LOAD(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_run(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpaiop_run_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_RUN, -+ cmd_flags, -+ token); -+ DPAIOP_CMD_RUN(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_sl_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_sl_version *version) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_SL_VERSION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_SL_VERSION(cmd, version); -+ -+ return 0; -+} -+ -+int dpaiop_get_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_STATE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_STATE(cmd, *state); -+ -+ return 0; -+} -+ -+int dpaiop_set_time_of_day(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t time_of_day) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_TIME_OF_DAY, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_SET_TIME_OF_DAY(cmd, time_of_day); -+ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_time_of_day(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t *time_of_day) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_TIME_OF_DAY, -+ cmd_flags, -+ token); -+ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPAIOP_RSP_GET_TIME_OF_DAY(cmd, *time_of_day); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpbp.c b/drivers/net/dpaa2/mc/dpbp.c -new file mode 100644 -index 0000000..87899b8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpbp.c -@@ -0,0 +1,432 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpbp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpbp_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPBP_CMD_OPEN(cmd, dpbp_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpbp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpbp_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ (void)(cfg); /* unused */ -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpbp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpbp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpbp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpbp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpbp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dpbp_set_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_SET_NOTIFICATIONS(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_CMD_GET_NOTIFICATIONS(cmd, cfg); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpci.c b/drivers/net/dpaa2/mc/dpci.c -new file mode 100644 -index 0000000..2ec02a1 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpci.c -@@ -0,0 +1,501 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpci_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpci_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPCI_CMD_OPEN(cmd, dpci_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpci_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpci_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPCI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpci_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpci_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpci_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPCI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpci_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpci_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpci_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpci_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpci_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpci_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpci_get_peer_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpci_peer_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_PEER_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_PEER_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpci_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *up) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_LINK_STATE(cmd, *up); -+ -+ return 0; -+} -+ -+int dpci_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ const struct dpci_rx_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpci_rx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_RX_QUEUE(cmd, priority); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_RX_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpci_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpci_tx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_TX_QUEUE, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_TX_QUEUE(cmd, priority); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_TX_QUEUE(cmd, attr); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpcon.c b/drivers/net/dpaa2/mc/dpcon.c -new file mode 100644 -index 0000000..396303d ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpcon.c -@@ -0,0 +1,401 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpcon_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpcon_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPCON_CMD_OPEN(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpcon_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpcon_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPCON_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpcon_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpcon_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET, -+ cmd_flags, token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpcon_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpcon_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpcon_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpcon_set_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_NOTIFICATION(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -diff --git a/drivers/net/dpaa2/mc/dpdbg.c b/drivers/net/dpaa2/mc/dpdbg.c -new file mode 100644 -index 0000000..6f2a08d ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpdbg.c -@@ -0,0 +1,547 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpdbg_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdbg_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPDBG_CMD_OPEN(cmd, dpdbg_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpdbg_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdbg_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpni_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_info *info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPNI_INFO, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPNI_INFO(cmd, dpni_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPNI_INFO(cmd, info); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpni_priv_tx_conf_fqid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint8_t sender_id, -+ uint32_t *fqid) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPDBG_CMDID_GET_DPNI_PRIV_TX_CONF_FQID, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPNI_PRIV_TX_CONF_FQID(cmd, dpni_id, sender_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPNI_PRIV_TX_CONF_FQID(cmd, *fqid); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpcon_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ struct dpdbg_dpcon_info *info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPCON_INFO, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPCON_INFO(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPCON_INFO(cmd, info); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpbp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpbp_id, -+ struct dpdbg_dpbp_info *info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPBP_INFO, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPBP_INFO(cmd, dpbp_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPBP_INFO(cmd, info); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpci_fqid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpci_id, -+ uint8_t priority, -+ uint32_t *fqid) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPBP_INFO, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPCI_FQID(cmd, dpci_id, priority); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPCI_FQID(cmd, *fqid); -+ -+ return 0; -+} -+ -+int dpdbg_prepare_ctlu_global_rule(struct dpkg_profile_cfg *dpkg_rule, -+ uint8_t *rule_buf) -+{ -+ int i, j; -+ int offset = 0; -+ int param = 1; -+ uint64_t *params = (uint64_t *)rule_buf; -+ -+ if (!rule_buf || !dpkg_rule) -+ return -EINVAL; -+ -+ params[0] |= mc_enc(0, 8, dpkg_rule->num_extracts); -+ params[0] = cpu_to_le64(params[0]); -+ -+ if (dpkg_rule->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) -+ return -EINVAL; -+ -+ for (i = 0; i < dpkg_rule->num_extracts; i++) { -+ switch (dpkg_rule->extracts[i].type) { -+ case DPKG_EXTRACT_FROM_HDR: -+ params[param] |= mc_enc(0, 8, -+ dpkg_rule->extracts[i].extract.from_hdr.prot); -+ params[param] |= mc_enc(8, 4, -+ dpkg_rule->extracts[i].extract.from_hdr.type); -+ params[param] |= mc_enc(16, 8, -+ dpkg_rule->extracts[i].extract.from_hdr.size); -+ params[param] |= mc_enc(24, 8, -+ dpkg_rule->extracts[i].extract.from_hdr.offset); -+ params[param] |= mc_enc(32, 32, -+ dpkg_rule->extracts[i].extract.from_hdr.field); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ params[param] |= mc_enc(0, 8, -+ dpkg_rule->extracts[i].extract. -+ from_hdr.hdr_index); -+ break; -+ case DPKG_EXTRACT_FROM_DATA: -+ params[param] |= mc_enc(16, 8, -+ dpkg_rule->extracts[i].extract.from_data.size); -+ params[param] |= mc_enc(24, 8, -+ dpkg_rule->extracts[i].extract. -+ from_data.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ case DPKG_EXTRACT_FROM_PARSE: -+ params[param] |= mc_enc(16, 8, -+ dpkg_rule->extracts[i].extract.from_parse.size); -+ params[param] |= mc_enc(24, 8, -+ dpkg_rule->extracts[i].extract. -+ from_parse.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ default: -+ return -EINVAL; -+ } -+ params[param] |= mc_enc( -+ 24, 8, dpkg_rule->extracts[i].num_of_byte_masks); -+ params[param] |= mc_enc(32, 4, dpkg_rule->extracts[i].type); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ for (offset = 0, j = 0; -+ j < DPKG_NUM_OF_MASKS; -+ offset += 16, j++) { -+ params[param] |= mc_enc( -+ (offset), 8, -+ dpkg_rule->extracts[i].masks[j].mask); -+ params[param] |= mc_enc( -+ (offset + 8), 8, -+ dpkg_rule->extracts[i].masks[j].offset); -+ } -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ } -+ return 0; -+} -+ -+int dpdbg_set_ctlu_global_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t marking, -+ struct dpdbg_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_CTLU_GLOBAL_MARKING, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_CTLU_GLOBAL_MARKING(cmd, marking, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpni_rx_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_rx_marking_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_RX_MARKING, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPNI_RX_MARKING(cmd, dpni_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpni_tx_conf_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint16_t sender_id, -+ uint8_t marking) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_TX_CONF_MARKING, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPNI_TX_CONF_MARKING(cmd, dpni_id, sender_id, marking); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpio_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpio_id, -+ uint8_t marking) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPIO_MARKING, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPIO_MARKING(cmd, dpio_id, marking); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_ctlu_global_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdbg_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_CTLU_GLOBAL_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_CTLU_GLOBAL_TRACE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpio_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpio_id, -+ struct dpdbg_dpio_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPIO_TRACE_POINTS]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPIO_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPIO_TRACE(cmd, dpio_id, trace_point); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpni_rx_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_rx_trace_cfg *trace_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_RX_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPNI_RX_TRACE(cmd, dpni_id, trace_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpni_tx_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint16_t sender_id, -+ struct dpdbg_dpni_tx_trace_cfg *trace_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_TX_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPNI_TX_TRACE(cmd, dpni_id, sender_id, trace_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpcon_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ struct dpdbg_dpcon_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPCON_TRACE_POINTS]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPCON_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPCON_TRACE(cmd, dpcon_id, trace_point); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpseci_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpseci_id, -+ struct dpdbg_dpseci_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPSECI_TRACE_POINTS]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPSECI_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPSECI_TRACE(cmd, dpseci_id, trace_point); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_get_dpmac_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpmac_id, -+ enum dpmac_counter counter_type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPMAC_COUNTER, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpmac_id, counter_type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPMAC_COUNTER(cmd, *counter); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpni_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ enum dpni_counter counter_type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPNI_COUNTER, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpni_id, counter_type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPNI_COUNTER(cmd, *counter); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpdcei.c b/drivers/net/dpaa2/mc/dpdcei.c -new file mode 100644 -index 0000000..a5c4c47 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpdcei.c -@@ -0,0 +1,449 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpdcei_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdcei_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPDCEI_CMD_OPEN(cmd, dpdcei_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdcei_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdcei_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPDCEI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdcei_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdcei_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdcei_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpdcei_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdcei_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdcei_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpdcei_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpdcei_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdcei_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpdcei_rx_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_SET_RX_QUEUE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_rx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_RX_QUEUE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_RX_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdcei_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_tx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_TX_QUEUE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_TX_QUEUE(cmd, attr); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpdmai.c b/drivers/net/dpaa2/mc/dpdmai.c -new file mode 100644 -index 0000000..154d2c6 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpdmai.c -@@ -0,0 +1,452 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpdmai_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmai_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPDMAI_CMD_OPEN(cmd, dpdmai_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdmai_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE, -+ cmd_flags, token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmai_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPDMAI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdmai_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmai_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmai_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpdmai_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmai_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmai_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ const struct dpdmai_rx_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, struct dpdmai_rx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_RX_QUEUE(cmd, priority); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_RX_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpdmai_tx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_TX_QUEUE(cmd, priority); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_TX_QUEUE(cmd, attr); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpdmux.c b/drivers/net/dpaa2/mc/dpdmux.c -new file mode 100644 -index 0000000..dc07608 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpdmux.c -@@ -0,0 +1,567 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpdmux_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmux_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPDMUX_CMD_OPEN(cmd, dpdmux_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdmux_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmux_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPDMUX_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdmux_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmux_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmux_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmux_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmux_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_accepted_frames *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_if_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_GET_ATTR(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IF_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, rule); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, rule); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpdmux_counter_type counter_type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IF_GET_COUNTER(cmd, *counter); -+ -+ return 0; -+} -+ -+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_state *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpio.c b/drivers/net/dpaa2/mc/dpio.c -new file mode 100644 -index 0000000..f511e29 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpio.c -@@ -0,0 +1,468 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpio_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpio_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPIO_CMD_OPEN(cmd, dpio_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpio_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpio_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPIO_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpio_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpio_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpio_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpio_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpio_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpio_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpio_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpio_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpio_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t sdest) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_STASHING_DEST(cmd, sdest); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t *sdest) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_STASHING_DEST(cmd, *sdest); -+ -+ return 0; -+} -+ -+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ uint8_t *channel_index) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL, -+ cmd_flags, -+ token); -+ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index); -+ -+ return 0; -+} -+ -+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL, -+ cmd_flags, -+ token); -+ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -diff --git a/drivers/net/dpaa2/mc/dpmac.c b/drivers/net/dpaa2/mc/dpmac.c -new file mode 100644 -index 0000000..f31d949 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpmac.c -@@ -0,0 +1,422 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpmac_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmac_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPMAC_CMD_OPEN(cmd, dpmac_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpmac_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPMAC_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpmac_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmac_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmac_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_READ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_MDIO_READ(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_MDIO_READ(cmd, cfg->data); -+ -+ return 0; -+} -+ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_WRITE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_MDIO_WRITE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPMAC_RSP_GET_LINK_CFG(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_state *link_state) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_LINK_STATE(cmd, link_state); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_COUNTER(cmd, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPMAC_RSP_GET_COUNTER(cmd, *counter); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpmcp.c b/drivers/net/dpaa2/mc/dpmcp.c -new file mode 100644 -index 0000000..dfd84b8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpmcp.c -@@ -0,0 +1,312 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpmcp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmcp_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPMCP_CMD_OPEN(cmd, dpmcp_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpmcp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmcp_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPMCP_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpmcp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmcp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPMCP_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmcp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPMCP_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMCP_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMCP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMCP_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMCP_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMCP_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMCP_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMCP_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMCP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMCP_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpmcp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmcp_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMCP_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpmng.c b/drivers/net/dpaa2/mc/dpmng.c -new file mode 100644 -index 0000000..cac5ba5 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpmng.c -@@ -0,0 +1,58 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int mc_get_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ struct mc_version *mc_ver_info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMNG_RSP_GET_VERSION(cmd, mc_ver_info); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c -new file mode 100644 -index 0000000..cdd2f37 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpni.c -@@ -0,0 +1,1907 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, -+ uint8_t *key_cfg_buf) -+{ -+ int i, j; -+ int offset = 0; -+ int param = 1; -+ uint64_t *params = (uint64_t *)key_cfg_buf; -+ -+ if (!key_cfg_buf || !cfg) -+ return -EINVAL; -+ -+ params[0] |= mc_enc(0, 8, cfg->num_extracts); -+ params[0] = cpu_to_le64(params[0]); -+ -+ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) -+ return -EINVAL; -+ -+ for (i = 0; i < cfg->num_extracts; i++) { -+ switch (cfg->extracts[i].type) { -+ case DPKG_EXTRACT_FROM_HDR: -+ params[param] |= mc_enc(0, 8, -+ cfg->extracts[i].extract.from_hdr.prot); -+ params[param] |= mc_enc(8, 4, -+ cfg->extracts[i].extract.from_hdr.type); -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract.from_hdr.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_hdr.offset); -+ params[param] |= mc_enc(32, 32, -+ cfg->extracts[i].extract. -+ from_hdr.field); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ params[param] |= mc_enc(0, 8, -+ cfg->extracts[i].extract. -+ from_hdr.hdr_index); -+ break; -+ case DPKG_EXTRACT_FROM_DATA: -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract. -+ from_data.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_data.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ case DPKG_EXTRACT_FROM_PARSE: -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract. -+ from_parse.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_parse.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ default: -+ return -EINVAL; -+ } -+ params[param] |= mc_enc( -+ 24, 8, cfg->extracts[i].num_of_byte_masks); -+ params[param] |= mc_enc(32, 4, cfg->extracts[i].type); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ for (offset = 0, j = 0; -+ j < DPKG_NUM_OF_MASKS; -+ offset += 16, j++) { -+ params[param] |= mc_enc( -+ (offset), 8, cfg->extracts[i].masks[j].mask); -+ params[param] |= mc_enc( -+ (offset + 8), 8, -+ cfg->extracts[i].masks[j].offset); -+ } -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ } -+ return 0; -+} -+ -+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, -+ uint8_t *ext_cfg_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)ext_cfg_buf; -+ -+ DPNI_PREP_EXTENDED_CFG(ext_params, cfg); -+ -+ return 0; -+} -+ -+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, -+ const uint8_t *ext_cfg_buf) -+{ -+ const uint64_t *ext_params = (const uint64_t *)ext_cfg_buf; -+ -+ DPNI_EXT_EXTENDED_CFG(ext_params, cfg); -+ -+ return 0; -+} -+ -+int dpni_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpni_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPNI_CMD_OPEN(cmd, dpni_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpni_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpni_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPNI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpni_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_pools_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_POOLS(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpni_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpni_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpni_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpni_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpni_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_ATTR(cmd, attr); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_error_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L3_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L3_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L4_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L4_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_qdid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *qdid) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_QDID(cmd, *qdid); -+ -+ return 0; -+} -+ -+int dpni_get_sp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_sp_info *sp_info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SP_INFO, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_SP_INFO(cmd, sp_info); -+ -+ return 0; -+} -+ -+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *data_offset) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_DATA_OFFSET(cmd, *data_offset); -+ -+ return 0; -+} -+ -+int dpni_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t *value) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_COUNTER(cmd, counter); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_COUNTER(cmd, *value); -+ -+ return 0; -+} -+ -+int dpni_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t value) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_COUNTER, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_COUNTER(cmd, counter, value); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_LINK_CFG(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_link_state *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_LINK_STATE(cmd, state); -+ -+ return 0; -+} -+ -+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_shaping_cfg *tx_shaper) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length); -+ -+ return 0; -+} -+ -+int dpni_set_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t mtu) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MTU, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MTU(cmd, mtu); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *mtu) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MTU, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MTU(cmd, *mtu); -+ -+ return 0; -+} -+ -+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr); -+ -+ return 0; -+} -+ -+int dpni_add_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_FILTERS, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_VLAN_FILTERS(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_selection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SELECTION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_SELECTION(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_dist_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *flow_id, -+ const struct dpni_tx_flow_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_FLOW(cmd, *flow_id, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_SET_TX_FLOW(cmd, *flow_id); -+ -+ return 0; -+} -+ -+int dpni_get_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_flow_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_FLOW(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_FLOW(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ const struct dpni_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ struct dpni_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_FLOW(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_ERR_QUEUE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_ERR_QUEUE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int revoke) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_REVOKE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_qos_tbl_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_QOS_TABLE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg, -+ uint8_t tc_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg, -+ uint16_t flow_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_INSERTION, -+ cmd_flags, token); -+ DPNI_CMD_SET_VLAN_INSERTION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_REMOVAL, -+ cmd_flags, token); -+ DPNI_CMD_SET_VLAN_REMOVAL(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_ipr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPR, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IPR(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_ipf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPF, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IPF(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_policing_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_POLICING, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_rx_tc_policing_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_POLICING, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg); -+ -+ return 0; -+} -+ -+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)early_drop_buf; -+ -+ DPNI_PREP_EARLY_DROP(ext_params, cfg); -+} -+ -+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, -+ const uint8_t *early_drop_buf) -+{ -+ const uint64_t *ext_params = (const uint64_t *)early_drop_buf; -+ -+ DPNI_EXT_EARLY_DROP(ext_params, cfg); -+} -+ -+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_tx_conf_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_conf_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_CONF(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_CONF(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dprc.c b/drivers/net/dpaa2/mc/dprc.c -new file mode 100644 -index 0000000..75c6a68 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dprc.c -@@ -0,0 +1,786 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dprc_get_container_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int *container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_CONTAINER_ID(cmd, *container_id); -+ -+ return 0; -+} -+ -+int dprc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int container_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags, -+ 0); -+ DPRC_CMD_OPEN(cmd, container_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dprc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_create_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_cfg *cfg, -+ int *child_container_id, -+ uint64_t *child_portal_paddr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ DPRC_CMD_CREATE_CONTAINER(cmd, cfg); -+ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_CREATE_CONTAINER(cmd, *child_container_id, -+ *child_portal_paddr); -+ -+ return 0; -+} -+ -+int dprc_destroy_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT, -+ cmd_flags, -+ token); -+ DPRC_CMD_DESTROY_CONTAINER(cmd, child_container_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_reset_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT, -+ cmd_flags, -+ token); -+ DPRC_CMD_RESET_CONTAINER(cmd, child_container_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dprc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dprc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dprc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dprc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPRC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_attributes *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dprc_set_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t quota) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_RES_QUOTA(cmd, child_container_id, type, quota); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t *quota) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_RES_QUOTA(cmd, child_container_id, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_RES_QUOTA(cmd, *quota); -+ -+ return 0; -+} -+ -+int dprc_assign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int container_id, -+ struct dprc_res_req *res_req) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN, -+ cmd_flags, -+ token); -+ DPRC_CMD_ASSIGN(cmd, container_id, res_req); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_unassign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ struct dprc_res_req *res_req) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN, -+ cmd_flags, -+ token); -+ DPRC_CMD_UNASSIGN(cmd, child_container_id, res_req); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_pool_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *pool_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_POOL_COUNT(cmd, *pool_count); -+ -+ return 0; -+} -+ -+int dprc_get_pool(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int pool_index, -+ char *type) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_POOL(cmd, pool_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_POOL(cmd, type); -+ -+ return 0; -+} -+ -+int dprc_get_obj_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *obj_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_OBJ_COUNT(cmd, *obj_count); -+ -+ return 0; -+} -+ -+int dprc_get_obj(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int obj_index, -+ struct dprc_obj_desc *obj_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_OBJ(cmd, obj_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_OBJ(cmd, obj_desc); -+ -+ return 0; -+} -+ -+int dprc_get_obj_desc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ struct dprc_obj_desc *obj_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_OBJ_DESC(cmd, obj_type, obj_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_OBJ_DESC(cmd, obj_desc); -+ -+ return 0; -+} -+ -+int dprc_set_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_OBJ_IRQ(cmd, -+ obj_type, -+ obj_id, -+ irq_index, -+ irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_OBJ_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dprc_get_res_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ int *res_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ *res_count = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_RES_COUNT(cmd, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_RES_COUNT(cmd, *res_count); -+ -+ return 0; -+} -+ -+int dprc_get_res_ids(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ struct dprc_res_ids_range_desc *range_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_RES_IDS(cmd, range_desc, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_RES_IDS(cmd, range_desc); -+ -+ return 0; -+} -+ -+int dprc_get_obj_region(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t region_index, -+ struct dprc_region_desc *region_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_OBJ_REGION(cmd, obj_type, obj_id, region_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_OBJ_REGION(cmd, region_desc); -+ -+ return 0; -+} -+ -+int dprc_set_obj_label(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ char *label) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_OBJ_LABEL(cmd, obj_type, obj_id, label); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_connect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ const struct dprc_endpoint *endpoint2, -+ const struct dprc_connection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT, -+ cmd_flags, -+ token); -+ DPRC_CMD_CONNECT(cmd, endpoint1, endpoint2, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_disconnect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT, -+ cmd_flags, -+ token); -+ DPRC_CMD_DISCONNECT(cmd, endpoint); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_connection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ struct dprc_endpoint *endpoint2, -+ int *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_CONNECTION(cmd, endpoint1); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_CONNECTION(cmd, endpoint2, *state); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dprtc.c b/drivers/net/dpaa2/mc/dprtc.c -new file mode 100644 -index 0000000..73667af ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dprtc.c -@@ -0,0 +1,509 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dprtc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dprtc_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPRTC_CMD_OPEN(cmd, dprtc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dprtc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dprtc_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ (void)(cfg); /* unused */ -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dprtc_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dprtc_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprtc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprtc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dprtc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprtc_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int64_t offset) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_CLOCK_OFFSET(cmd, offset); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t freq_compensation) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_FREQ_COMPENSATION(cmd, freq_compensation); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t *freq_compensation) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_FREQ_COMPENSATION(cmd, *freq_compensation); -+ -+ return 0; -+} -+ -+int dprtc_get_time(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t *time) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_TIME(cmd, *time); -+ -+ return 0; -+} -+ -+int dprtc_set_time(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t time) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_TIME(cmd, time); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_set_alarm(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, uint64_t time) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_ALARM(cmd, time); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -diff --git a/drivers/net/dpaa2/mc/dpseci.c b/drivers/net/dpaa2/mc/dpseci.c -new file mode 100644 -index 0000000..a4b932a ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpseci.c -@@ -0,0 +1,502 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpseci_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpseci_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPSECI_CMD_OPEN(cmd, dpseci_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpseci_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpseci_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPSECI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpseci_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpseci_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpseci_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpseci_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpseci_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ const struct dpseci_rx_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ struct dpseci_rx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_RX_QUEUE(cmd, queue); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_RX_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ struct dpseci_tx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_TX_QUEUE(cmd, queue); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_TX_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_sec_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_SEC_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_sec_counters *counters) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpsw.c b/drivers/net/dpaa2/mc/dpsw.c -new file mode 100644 -index 0000000..2034b55 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpsw.c -@@ -0,0 +1,1639 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+/* internal functions */ -+static void build_if_id_bitmap(const uint16_t *if_id, -+ const uint16_t num_ifs, -+ struct mc_command *cmd, -+ int start_param) -+{ -+ int i; -+ -+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) -+ cmd->params[start_param + (if_id[i] / 64)] |= mc_enc( -+ (if_id[i] % 64), 1, 1); -+} -+ -+static int read_if_id_bitmap(uint16_t *if_id, -+ uint16_t *num_ifs, -+ struct mc_command *cmd, -+ int start_param) -+{ -+ int bitmap[DPSW_MAX_IF] = { 0 }; -+ int i, j = 0; -+ int count = 0; -+ -+ for (i = 0; i < DPSW_MAX_IF; i++) { -+ bitmap[i] = (int)mc_dec(cmd->params[start_param + i / 64], -+ i % 64, 1); -+ count += bitmap[i]; -+ } -+ -+ *num_ifs = (uint16_t)count; -+ -+ for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) { -+ if (bitmap[i]) { -+ if_id[j] = (uint16_t)i; -+ j++; -+ } -+ } -+ -+ return 0; -+} -+ -+/* DPSW APIs */ -+int dpsw_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpsw_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPSW_CMD_OPEN(cmd, dpsw_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpsw_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpsw_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPSW_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpsw_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpsw_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpsw_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpsw_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_REFLECTION_IF(cmd, if_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_state *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_LINK_STATE(cmd, state); -+ -+ return 0; -+} -+ -+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tci_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_tci_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_TCI(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_TCI(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_stp_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_STP(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_accepted_frames_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int accept_all) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_COUNTER(cmd, *counter); -+ -+ return 0; -+} -+ -+int dpsw_if_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t counter) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tx_selection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_metering_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ const struct dpsw_metering_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)early_drop_buf; -+ -+ DPSW_PREP_EARLY_DROP(ext_params, cfg); -+} -+ -+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID, -+ cmd_flags, -+ token); -+ DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID, -+ cmd_flags, -+ token); -+ DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_ENABLE(cmd, if_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_DISABLE(cmd, if_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_if_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_ATTR(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t *frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, *frame_length); -+ -+ return 0; -+} -+ -+int dpsw_vlan_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_IF(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_IF(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1); -+ -+ return 0; -+} -+ -+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1); -+ -+ return 0; -+} -+ -+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_IF(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1); -+ -+ return 0; -+} -+ -+int dpsw_fdb_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *fdb_id, -+ const struct dpsw_fdb_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_ADD(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_ADD(cmd, *fdb_id); -+ -+ return 0; -+} -+ -+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_REMOVE(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_unicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_GET_UNICAST(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_multicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 2); -+ -+ return 0; -+} -+ -+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ enum dpsw_fdb_learning_mode mode) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_acl_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *acl_id, -+ const struct dpsw_acl_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_ADD(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_ACL_ADD(cmd, *acl_id); -+ -+ return 0; -+} -+ -+int dpsw_acl_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_REMOVE(cmd, acl_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, -+ uint8_t *entry_cfg_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)entry_cfg_buf; -+ -+ DPSW_PREP_ACL_ENTRY(ext_params, key); -+} -+ -+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ struct dpsw_acl_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_GET_ATTR(cmd, acl_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_ACL_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_ctrl_if_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_ctrl_if_pools_cfg *pools) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS, -+ cmd_flags, -+ token); -+ DPSW_CMD_CTRL_IF_SET_POOLS(cmd, pools); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+/** -+* @brief Function disables control interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -diff --git a/drivers/net/dpaa2/mc/fsl_dpaiop.h b/drivers/net/dpaa2/mc/fsl_dpaiop.h -new file mode 100644 -index 0000000..b039b2a ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpaiop.h -@@ -0,0 +1,494 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPAIOP_H -+#define __FSL_DPAIOP_H -+ -+struct fsl_mc_io; -+ -+/* Data Path AIOP API -+ * Contains initialization APIs and runtime control APIs for DPAIOP -+ */ -+ -+/** -+ * dpaiop_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpaiop_id: DPAIOP unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpaiop_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpaiop_id, -+ uint16_t *token); -+ -+/** -+ * dpaiop_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_close(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token); -+ -+/** -+ * struct dpaiop_cfg - Structure representing DPAIOP configuration -+ * @aiop_id: AIOP ID -+ * @aiop_container_id: AIOP container ID -+ */ -+struct dpaiop_cfg { -+ int aiop_id; -+ int aiop_container_id; -+}; -+ -+/** -+ * dpaiop_create() - Create the DPAIOP object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPAIOP object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpaiop_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpaiop_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpaiop_destroy() - Destroy the DPAIOP object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpaiop_destroy(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token); -+ -+/** -+ * dpaiop_reset() - Reset the DPAIOP, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_reset(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token); -+ -+/** -+ * struct dpaiop_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpaiop_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpaiop_set_irq() - Set IRQ information for the DPAIOP to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpaiop_irq_cfg *irq_cfg); -+ -+/** -+ * dpaiop_get_irq() - Get IRQ information from the DPAIOP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpaiop_irq_cfg *irq_cfg); -+ -+/** -+ * dpaiop_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpaiop_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpaiop_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpaiop_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpaiop_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpaiop_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpaiop_attr - Structure representing DPAIOP attributes -+ * @id: AIOP ID -+ * @version: DPAIOP version -+ */ -+struct dpaiop_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPAIOP version -+ * @major: DPAIOP major version -+ * @minor: DPAIOP minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpaiop_get_attributes - Retrieve DPAIOP attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_attr *attr); -+ -+/** -+ * struct dpaiop_load_cfg - AIOP load configuration -+ * @options: AIOP load options -+ * @img_iova: I/O virtual address of AIOP ELF image -+ * @img_size: Size of AIOP ELF image in memory (in bytes) -+ */ -+struct dpaiop_load_cfg { -+ uint64_t options; -+ uint64_t img_iova; -+ uint32_t img_size; -+}; -+ -+/** -+ * dpaiop_load_aiop() - Loads an image to AIOP -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @cfg: AIOP load configurations -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_load(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_load_cfg *cfg); -+ -+#define DPAIOP_RUN_OPT_DEBUG 0x0000000000000001ULL -+ -+/** -+ * struct dpaiop_run_cfg - AIOP run configuration -+ * @cores_mask: Mask of AIOP cores to run (core 0 in most significant bit) -+ * @options: Execution options (currently none defined) -+ * @args_iova: I/O virtual address of AIOP arguments -+ * @args_size: Size of AIOP arguments in memory (in bytes) -+ */ -+struct dpaiop_run_cfg { -+ uint64_t cores_mask; -+ uint64_t options; -+ uint64_t args_iova; -+ uint32_t args_size; -+}; -+ -+/** -+ * dpaiop_run_aiop() - Starts AIOP execution -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @cfg: AIOP run configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_run(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpaiop_run_cfg *cfg); -+ -+/** -+ * struct dpaiop_sl_version - AIOP SL (Service Layer) version -+ * @major: AIOP SL major version number -+ * @minor: AIOP SL minor version number -+ * @revision: AIOP SL revision number -+ */ -+struct dpaiop_sl_version { -+ uint32_t major; -+ uint32_t minor; -+ uint32_t revision; -+}; -+ -+/** -+ * dpaiop_get_sl_version() - Get AIOP SL (Service Layer) version -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @version: AIOP SL version number -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_sl_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_sl_version *version); -+ -+/** -+ * AIOP states -+ * -+ * AIOP internal states, can be retrieved by calling dpaiop_get_state() routine -+ */ -+ -+/** -+ * AIOP reset successfully completed. -+ */ -+#define DPAIOP_STATE_RESET_DONE 0x00000000 -+/** -+ * AIOP reset is ongoing. -+ */ -+#define DPAIOP_STATE_RESET_ONGOING 0x00000001 -+ -+/** -+ * AIOP image loading successfully completed. -+ */ -+#define DPAIOP_STATE_LOAD_DONE 0x00000002 -+/** -+ * AIOP image loading is ongoing. -+ */ -+#define DPAIOP_STATE_LOAD_ONGIONG 0x00000004 -+/** -+ * AIOP image loading completed with error. -+ */ -+#define DPAIOP_STATE_LOAD_ERROR 0x00000008 -+ -+/** -+ * Boot process of AIOP cores is ongoing. -+ */ -+#define DPAIOP_STATE_BOOT_ONGOING 0x00000010 -+/** -+ * Boot process of AIOP cores completed with an error. -+ */ -+#define DPAIOP_STATE_BOOT_ERROR 0x00000020 -+/** -+ * AIOP cores are functional and running -+ */ -+#define DPAIOP_STATE_RUNNING 0x00000040 -+/** @} */ -+ -+/** -+ * dpaiop_get_state() - Get AIOP state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @state: AIOP state -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t *state); -+ -+/** -+ * dpaiop_set_time_of_day() - Set AIOP internal time-of-day -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @time_of_day: Current number of milliseconds since the Epoch -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_set_time_of_day(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t time_of_day); -+ -+/** -+ * dpaiop_get_time_of_day() - Get AIOP internal time-of-day -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @time_of_day: Current number of milliseconds since the Epoch -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_time_of_day(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t *time_of_day); -+ -+#endif /* __FSL_DPAIOP_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h b/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h -new file mode 100644 -index 0000000..5b77bb8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h -@@ -0,0 +1,190 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPAIOP_CMD_H -+#define _FSL_DPAIOP_CMD_H -+ -+/* DPAIOP Version */ -+#define DPAIOP_VER_MAJOR 1 -+#define DPAIOP_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPAIOP_CMDID_CLOSE 0x800 -+#define DPAIOP_CMDID_OPEN 0x80a -+#define DPAIOP_CMDID_CREATE 0x90a -+#define DPAIOP_CMDID_DESTROY 0x900 -+ -+#define DPAIOP_CMDID_GET_ATTR 0x004 -+#define DPAIOP_CMDID_RESET 0x005 -+ -+#define DPAIOP_CMDID_SET_IRQ 0x010 -+#define DPAIOP_CMDID_GET_IRQ 0x011 -+#define DPAIOP_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPAIOP_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPAIOP_CMDID_SET_IRQ_MASK 0x014 -+#define DPAIOP_CMDID_GET_IRQ_MASK 0x015 -+#define DPAIOP_CMDID_GET_IRQ_STATUS 0x016 -+#define DPAIOP_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPAIOP_CMDID_LOAD 0x280 -+#define DPAIOP_CMDID_RUN 0x281 -+#define DPAIOP_CMDID_GET_SL_VERSION 0x282 -+#define DPAIOP_CMDID_GET_STATE 0x283 -+#define DPAIOP_CMDID_SET_TIME_OF_DAY 0x284 -+#define DPAIOP_CMDID_GET_TIME_OF_DAY 0x285 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_OPEN(cmd, dpaiop_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpaiop_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->aiop_id);\ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->aiop_container_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_LOAD(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->img_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->img_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_RUN(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->args_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->cores_mask); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->args_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_SL_VERSION(cmd, version) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, version->major);\ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, version->minor);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, version->revision);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_STATE(cmd, state) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_SET_TIME_OF_DAY(cmd, time_of_day) \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time_of_day) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_TIME_OF_DAY(cmd, time_of_day) \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, time_of_day) -+ -+#endif /* _FSL_DPAIOP_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpbp.h b/drivers/net/dpaa2/mc/fsl_dpbp.h -new file mode 100644 -index 0000000..9856bb8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpbp.h -@@ -0,0 +1,438 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPBP_H -+#define __FSL_DPBP_H -+ -+/* Data Path Buffer Pool API -+ * Contains initialization APIs and runtime control APIs for DPBP -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpbp_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpbp_id: DPBP unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpbp_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpbp_id, -+ uint16_t *token); -+ -+/** -+ * dpbp_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpbp_cfg - Structure representing DPBP configuration -+ * @options: place holder -+ */ -+struct dpbp_cfg { -+ uint32_t options; -+}; -+ -+/** -+ * dpbp_create() - Create the DPBP object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPBP object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpbp_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpbp_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpbp_destroy() - Destroy the DPBP object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpbp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_enable() - Enable the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_disable() - Disable the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_is_enabled() - Check if the DPBP is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpbp_reset() - Reset the DPBP, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpbp_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpbp_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpbp_irq_cfg *irq_cfg); -+ -+/** -+ * dpbp_get_irq() - Get IRQ information from the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpbp_irq_cfg *irq_cfg); -+ -+/** -+ * dpbp_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpbp_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpbp_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpbp_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpbp_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpbp_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpbp_attr - Structure representing DPBP attributes -+ * @id: DPBP object ID -+ * @version: DPBP version -+ * @bpid: Hardware buffer pool ID; should be used as an argument in -+ * acquire/release operations on buffers -+ */ -+struct dpbp_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPBP version -+ * @major: DPBP major version -+ * @minor: DPBP minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint16_t bpid; -+}; -+ -+/** -+ * dpbp_get_attributes - Retrieve DPBP attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_attr *attr); -+ -+/** -+ * DPBP notifications options -+ */ -+ -+/** -+ * BPSCN write will attempt to allocate into a cache (coherent write) -+ */ -+#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001 -+ -+/** -+ * struct dpbp_notification_cfg - Structure representing DPBP notifications -+ * towards software -+ * @depletion_entry: below this threshold the pool is "depleted"; -+ * set it to '0' to disable it -+ * @depletion_exit: greater than or equal to this threshold the pool exit its -+ * "depleted" state -+ * @surplus_entry: above this threshold the pool is in "surplus" state; -+ * set it to '0' to disable it -+ * @surplus_exit: less than or equal to this threshold the pool exit its -+ * "surplus" state -+ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry' -+ * is not '0' (enable); I/O virtual address (must be in DMA-able memory), -+ * must be 16B aligned. -+ * @message_ctx: The context that will be part of the BPSCN message and will -+ * be written to 'message_iova' -+ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_' values -+ */ -+struct dpbp_notification_cfg { -+ uint32_t depletion_entry; -+ uint32_t depletion_exit; -+ uint32_t surplus_entry; -+ uint32_t surplus_exit; -+ uint64_t message_iova; -+ uint64_t message_ctx; -+ uint16_t options; -+}; -+ -+/** -+ * dpbp_set_notifications() - Set notifications towards software -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @cfg: notifications configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg); -+ -+/** -+ * dpbp_get_notifications() - Get the notifications configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @cfg: notifications configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg); -+ -+#endif /* __FSL_DPBP_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h b/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h -new file mode 100644 -index 0000000..71ad96a ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h -@@ -0,0 +1,172 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPBP_CMD_H -+#define _FSL_DPBP_CMD_H -+ -+/* DPBP Version */ -+#define DPBP_VER_MAJOR 2 -+#define DPBP_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPBP_CMDID_CLOSE 0x800 -+#define DPBP_CMDID_OPEN 0x804 -+#define DPBP_CMDID_CREATE 0x904 -+#define DPBP_CMDID_DESTROY 0x900 -+ -+#define DPBP_CMDID_ENABLE 0x002 -+#define DPBP_CMDID_DISABLE 0x003 -+#define DPBP_CMDID_GET_ATTR 0x004 -+#define DPBP_CMDID_RESET 0x005 -+#define DPBP_CMDID_IS_ENABLED 0x006 -+ -+#define DPBP_CMDID_SET_IRQ 0x010 -+#define DPBP_CMDID_GET_IRQ 0x011 -+#define DPBP_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPBP_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPBP_CMDID_SET_IRQ_MASK 0x014 -+#define DPBP_CMDID_GET_IRQ_MASK 0x015 -+#define DPBP_CMDID_GET_IRQ_STATUS 0x016 -+#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0 -+#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_OPEN(cmd, dpbp_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->bpid); \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_SET_NOTIFICATIONS(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->depletion_entry); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->depletion_exit);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->surplus_entry);\ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->surplus_exit);\ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options);\ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_GET_NOTIFICATIONS(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, cfg->depletion_entry); \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->depletion_exit);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->surplus_entry);\ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->surplus_exit);\ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options);\ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova);\ -+} while (0) -+#endif /* _FSL_DPBP_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpci.h b/drivers/net/dpaa2/mc/fsl_dpci.h -new file mode 100644 -index 0000000..d885935 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpci.h -@@ -0,0 +1,594 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPCI_H -+#define __FSL_DPCI_H -+ -+/* Data Path Communication Interface API -+ * Contains initialization APIs and runtime control APIs for DPCI -+ */ -+ -+struct fsl_mc_io; -+ -+/** General DPCI macros */ -+ -+/** -+ * Maximum number of Tx/Rx priorities per DPCI object -+ */ -+#define DPCI_PRIO_NUM 2 -+ -+/** -+ * Indicates an invalid frame queue -+ */ -+#define DPCI_FQID_NOT_VALID (uint32_t)(-1) -+ -+/** -+ * All queues considered; see dpci_set_rx_queue() -+ */ -+#define DPCI_ALL_QUEUES (uint8_t)(-1) -+ -+/** -+ * dpci_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpci_id: DPCI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpci_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpci_id, -+ uint16_t *token); -+ -+/** -+ * dpci_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpci_cfg - Structure representing DPCI configuration -+ * @num_of_priorities: Number of receive priorities (queues) for the DPCI; -+ * note, that the number of transmit priorities (queues) -+ * is determined by the number of receive priorities of -+ * the peer DPCI object -+ */ -+struct dpci_cfg { -+ uint8_t num_of_priorities; -+}; -+ -+/** -+ * dpci_create() - Create the DPCI object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPCI object, allocate required resources and perform required -+ * initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpci_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpci_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpci_destroy() - Destroy the DPCI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpci_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpci_enable() - Enable the DPCI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpci_disable() - Disable the DPCI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpci_is_enabled() - Check if the DPCI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpci_reset() - Reset the DPCI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** DPCI IRQ Index and Events */ -+ -+/** -+ * IRQ index -+ */ -+#define DPCI_IRQ_INDEX 0 -+ -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPCI_IRQ_EVENT_LINK_CHANGED 0x00000001 -+/** -+ * IRQ event - indicates a connection event -+ */ -+#define DPCI_IRQ_EVENT_CONNECTED 0x00000002 -+/** -+ * IRQ event - indicates a disconnection event -+ */ -+#define DPCI_IRQ_EVENT_DISCONNECTED 0x00000004 -+ -+/** -+ * struct dpci_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpci_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpci_set_irq() - Set IRQ information for the DPCI to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpci_irq_cfg *irq_cfg); -+ -+/** -+ * dpci_get_irq() - Get IRQ information from the DPCI. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpci_irq_cfg *irq_cfg); -+ -+/** -+ * dpci_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpci_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpci_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpci_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpci_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpci_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpci_attr - Structure representing DPCI attributes -+ * @id: DPCI object ID -+ * @version: DPCI version -+ * @num_of_priorities: Number of receive priorities -+ */ -+struct dpci_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPCI attributes -+ * @major: DPCI major version -+ * @minor: DPCI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint8_t num_of_priorities; -+}; -+ -+/** -+ * dpci_get_attributes() - Retrieve DPCI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpci_attr *attr); -+ -+/** -+ * struct dpci_peer_attr - Structure representing the peer DPCI attributes -+ * @peer_id: DPCI peer id; if no peer is connected returns (-1) -+ * @num_of_priorities: The pper's number of receive priorities; determines the -+ * number of transmit priorities for the local DPCI object -+ */ -+struct dpci_peer_attr { -+ int peer_id; -+ uint8_t num_of_priorities; -+}; -+ -+/** -+ * dpci_get_peer_attributes() - Retrieve peer DPCI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @attr: Returned peer attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_peer_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpci_peer_attr *attr); -+ -+/** -+ * dpci_get_link_state() - Retrieve the DPCI link state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @up: Returned link state; returns '1' if link is up, '0' otherwise -+ * -+ * DPCI can be connected to another DPCI, together they -+ * create a 'link'. In order to use the DPCI Tx and Rx queues, -+ * both objects must be enabled. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *up); -+ -+/** -+ * enum dpci_dest - DPCI destination types -+ * @DPCI_DEST_NONE: Unassigned destination; The queue is set in parked mode -+ * and does not generate FQDAN notifications; user is -+ * expected to dequeue from the queue based on polling or -+ * other user-defined method -+ * @DPCI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected -+ * to dequeue from the queue only after notification is -+ * received -+ * @DPCI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified -+ * DPCON object; -+ * user is expected to dequeue from the DPCON channel -+ */ -+enum dpci_dest { -+ DPCI_DEST_NONE = 0, -+ DPCI_DEST_DPIO = 1, -+ DPCI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpci_dest_cfg - Structure representing DPCI destination configuration -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid -+ * values are 0-1 or 0-7, depending on the number of priorities -+ * in that channel; not relevant for 'DPCI_DEST_NONE' option -+ */ -+struct dpci_dest_cfg { -+ enum dpci_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/** DPCI queue modification options */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPCI_QUEUE_OPT_USER_CTX 0x00000001 -+ -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPCI_QUEUE_OPT_DEST 0x00000002 -+ -+/** -+ * struct dpci_rx_queue_cfg - Structure representing RX queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPCI_QUEUE_OPT_' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; -+ * valid only if 'DPCI_QUEUE_OPT_USER_CTX' is contained in -+ * 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPCI_QUEUE_OPT_DEST' is contained in 'options' -+ */ -+struct dpci_rx_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpci_dest_cfg dest_cfg; -+}; -+ -+/** -+ * dpci_set_rx_queue() - Set Rx queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPCI creation; use -+ * DPCI_ALL_QUEUES to configure all Rx queues -+ * identically. -+ * @cfg: Rx queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ const struct dpci_rx_queue_cfg *cfg); -+ -+/** -+ * struct dpci_rx_queue_attr - Structure representing Rx queue attributes -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @fqid: Virtual FQID value to be used for dequeue operations -+ */ -+struct dpci_rx_queue_attr { -+ uint64_t user_ctx; -+ struct dpci_dest_cfg dest_cfg; -+ uint32_t fqid; -+}; -+ -+/** -+ * dpci_get_rx_queue() - Retrieve Rx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPCI creation -+ * @attr: Returned Rx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpci_rx_queue_attr *attr); -+ -+/** -+ * struct dpci_tx_queue_attr - Structure representing attributes of Tx queues -+ * @fqid: Virtual FQID to be used for sending frames to peer DPCI; -+ * returns 'DPCI_FQID_NOT_VALID' if a no peer is connected or if -+ * the selected priority exceeds the number of priorities of the -+ * peer DPCI object -+ */ -+struct dpci_tx_queue_attr { -+ uint32_t fqid; -+}; -+ -+/** -+ * dpci_get_tx_queue() - Retrieve Tx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @priority: Select the queue relative to number of -+ * priorities of the peer DPCI object -+ * @attr: Returned Tx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpci_tx_queue_attr *attr); -+ -+#endif /* __FSL_DPCI_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpci_cmd.h b/drivers/net/dpaa2/mc/fsl_dpci_cmd.h -new file mode 100644 -index 0000000..f45e435 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpci_cmd.h -@@ -0,0 +1,200 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPCI_CMD_H -+#define _FSL_DPCI_CMD_H -+ -+/* DPCI Version */ -+#define DPCI_VER_MAJOR 2 -+#define DPCI_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPCI_CMDID_CLOSE 0x800 -+#define DPCI_CMDID_OPEN 0x807 -+#define DPCI_CMDID_CREATE 0x907 -+#define DPCI_CMDID_DESTROY 0x900 -+ -+#define DPCI_CMDID_ENABLE 0x002 -+#define DPCI_CMDID_DISABLE 0x003 -+#define DPCI_CMDID_GET_ATTR 0x004 -+#define DPCI_CMDID_RESET 0x005 -+#define DPCI_CMDID_IS_ENABLED 0x006 -+ -+#define DPCI_CMDID_SET_IRQ 0x010 -+#define DPCI_CMDID_GET_IRQ 0x011 -+#define DPCI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPCI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPCI_CMDID_SET_IRQ_MASK 0x014 -+#define DPCI_CMDID_GET_IRQ_MASK 0x015 -+#define DPCI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPCI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPCI_CMDID_SET_RX_QUEUE 0x0e0 -+#define DPCI_CMDID_GET_LINK_STATE 0x0e1 -+#define DPCI_CMDID_GET_PEER_ATTR 0x0e2 -+#define DPCI_CMDID_GET_RX_QUEUE 0x0e3 -+#define DPCI_CMDID_GET_TX_QUEUE 0x0e4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_OPEN(cmd, dpci_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpci_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_of_priorities) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_of_priorities);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_PEER_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->peer_id);\ -+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_of_priorities);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_LINK_STATE(cmd, up) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, up) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority);\ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpci_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_RX_QUEUE(cmd, priority) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_RX_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 48, 4, enum dpci_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_TX_QUEUE(cmd, priority) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_TX_QUEUE(cmd, attr) \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid) -+ -+#endif /* _FSL_DPCI_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpcon.h b/drivers/net/dpaa2/mc/fsl_dpcon.h -new file mode 100644 -index 0000000..2555be5 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpcon.h -@@ -0,0 +1,407 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPCON_H -+#define __FSL_DPCON_H -+ -+/* Data Path Concentrator API -+ * Contains initialization APIs and runtime control APIs for DPCON -+ */ -+ -+struct fsl_mc_io; -+ -+/** General DPCON macros */ -+ -+/** -+ * Use it to disable notifications; see dpcon_set_notification() -+ */ -+#define DPCON_INVALID_DPIO_ID (int)(-1) -+ -+/** -+ * dpcon_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpcon_id: DPCON unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpcon_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpcon_id, -+ uint16_t *token); -+ -+/** -+ * dpcon_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpcon_cfg - Structure representing DPCON configuration -+ * @num_priorities: Number of priorities for the DPCON channel (1-8) -+ */ -+struct dpcon_cfg { -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpcon_create() - Create the DPCON object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPCON object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpcon_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpcon_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpcon_destroy() - Destroy the DPCON object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpcon_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_enable() - Enable the DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_disable() - Disable the DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_is_enabled() - Check if the DPCON is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpcon_reset() - Reset the DPCON, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpcon_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpcon_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpcon_set_irq() - Set IRQ information for the DPCON to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpcon_irq_cfg *irq_cfg); -+ -+/** -+ * dpcon_get_irq() - Get IRQ information from the DPCON. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpcon_irq_cfg *irq_cfg); -+ -+/** -+ * dpcon_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpcon_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpcon_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpcon_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpcon_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @status: interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpcon_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpcon_attr - Structure representing DPCON attributes -+ * @id: DPCON object ID -+ * @version: DPCON version -+ * @qbman_ch_id: Channel ID to be used by dequeue operation -+ * @num_priorities: Number of priorities for the DPCON channel (1-8) -+ */ -+struct dpcon_attr { -+ int id; -+ /** -+ * struct version - DPCON version -+ * @major: DPCON major version -+ * @minor: DPCON minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint16_t qbman_ch_id; -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpcon_get_attributes() - Retrieve DPCON attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @attr: Object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_attr *attr); -+ -+/** -+ * struct dpcon_notification_cfg - Structure representing notification parameters -+ * @dpio_id: DPIO object ID; must be configured with a notification channel; -+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID'; -+ * @priority: Priority selection within the DPIO channel; valid values -+ * are 0-7, depending on the number of priorities in that channel -+ * @user_ctx: User context value provided with each CDAN message -+ */ -+struct dpcon_notification_cfg { -+ int dpio_id; -+ uint8_t priority; -+ uint64_t user_ctx; -+}; -+ -+/** -+ * dpcon_set_notification() - Set DPCON notification destination -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @cfg: Notification parameters -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_set_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_notification_cfg *cfg); -+ -+#endif /* __FSL_DPCON_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h b/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h -new file mode 100644 -index 0000000..ecb40d0 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h -@@ -0,0 +1,162 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPCON_CMD_H -+#define _FSL_DPCON_CMD_H -+ -+/* DPCON Version */ -+#define DPCON_VER_MAJOR 2 -+#define DPCON_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPCON_CMDID_CLOSE 0x800 -+#define DPCON_CMDID_OPEN 0x808 -+#define DPCON_CMDID_CREATE 0x908 -+#define DPCON_CMDID_DESTROY 0x900 -+ -+#define DPCON_CMDID_ENABLE 0x002 -+#define DPCON_CMDID_DISABLE 0x003 -+#define DPCON_CMDID_GET_ATTR 0x004 -+#define DPCON_CMDID_RESET 0x005 -+#define DPCON_CMDID_IS_ENABLED 0x006 -+ -+#define DPCON_CMDID_SET_IRQ 0x010 -+#define DPCON_CMDID_GET_IRQ 0x011 -+#define DPCON_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPCON_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPCON_CMDID_SET_IRQ_MASK 0x014 -+#define DPCON_CMDID_GET_IRQ_MASK 0x015 -+#define DPCON_CMDID_GET_IRQ_STATUS 0x016 -+#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPCON_CMDID_SET_NOTIFICATION 0x100 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_OPEN(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\ -+} while (0) -+ -+#endif /* _FSL_DPCON_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdbg.h b/drivers/net/dpaa2/mc/fsl_dpdbg.h -new file mode 100644 -index 0000000..ead22e8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdbg.h -@@ -0,0 +1,635 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPDBG_H -+#define __FSL_DPDBG_H -+ -+#include -+#include -+#include -+ -+/* Data Path Debug API -+ * Contains initialization APIs and runtime control APIs for DPDBG -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpdbg_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpdbg_id: DPDBG unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdbg_id, -+ uint16_t *token); -+ -+/** -+ * dpdbg_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdbg_attr - Structure representing DPDBG attributes -+ * @id: DPDBG object ID -+ * @version: DPDBG version -+ */ -+struct dpdbg_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPDBG version -+ * @major: DPDBG major version -+ * @minor: DPDBG minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpdbg_get_attributes - Retrieve DPDBG attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdbg_attr *attr); -+ -+/** -+ * struct dpdbg_dpni_info - Info of DPNI -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; Non-power-of-2 values are rounded -+ * up to the next power-of-2 value as hardware demands it; -+ * '0' will be treated as '1' -+ * @qdid: Virtual QDID. -+ * @err_fqid: Virtual FQID for error queues -+ * @tx_conf_fqid: Virtual FQID for global TX confirmation queue -+ */ -+struct dpdbg_dpni_info { -+ uint8_t max_senders; -+ uint32_t qdid; -+ uint32_t err_fqid; -+ uint32_t tx_conf_fqid; -+}; -+ -+/** -+ * dpdbg_get_dpni_info() - Retrieve info for a specific DPNI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @info: The returned info -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpni_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_info *info); -+ -+/** -+ * dpdbg_get_dpni_private_fqid() - Retrieve the virtual TX confirmation queue -+ * FQID of the required DPNI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @sender_id: The requested sender ID -+ * @fqid: The returned virtual private TX confirmation FQID. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpni_priv_tx_conf_fqid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint8_t sender_id, -+ uint32_t *fqid); -+ -+/** -+ * struct dpdbg_dpcon_info - Info of DPCON -+ * @ch_id: Channel ID -+ */ -+struct dpdbg_dpcon_info { -+ uint32_t ch_id; -+}; -+ -+/** -+ * dpdbg_get_dpcon_info() - Retrieve info of DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpcon_id: The requested DPCON ID -+ * @info: The returned info. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpcon_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ struct dpdbg_dpcon_info *info); -+ -+/** -+ * struct dpdbg_dpbp_info - Info of DPBP -+ * @bpid: Virtual buffer pool ID -+ */ -+struct dpdbg_dpbp_info { -+ uint32_t bpid; -+}; -+ -+/** -+ * dpdbg_get_dpbp_info() - Retrieve info of DPBP -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpbp_id: The requested DPBP ID -+ * @info: The returned info. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpbp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpbp_id, -+ struct dpdbg_dpbp_info *info); -+ -+/** -+ * dpdbg_get_dpci_fqid() - Retrieve the virtual FQID of the required DPCI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpci_id: The requested DPCI ID -+ * @priority: Select the queue relative to number of priorities configured at -+ * DPCI creation -+ * @fqid: The returned virtual FQID. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpci_fqid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpci_id, -+ uint8_t priority, -+ uint32_t *fqid); -+ -+/** -+ * Maximum size for rule match (in bytes) -+ */ -+#define DPDBG_MAX_RULE_SIZE 56 -+/** -+ * Disable marking -+ */ -+#define DPDBG_DISABLE_MARKING 0xFF -+ -+/** -+ * dpdbg_prepare_ctlu_global_rule() - function prepare extract parameters -+ * @dpkg_rule: defining a full Key Generation profile (rule) -+ * @rule_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpdbg_set_global_marking() -+ */ -+int dpdbg_prepare_ctlu_global_rule(struct dpkg_profile_cfg *dpkg_rule, -+ uint8_t *rule_buf); -+ -+/** -+ * struct dpdbg_rule_cfg - Rule configuration for table lookup -+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory) -+ * @rule_iova: I/O virtual address of the rule (must be in DMA-able memory) -+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) -+ * @key_size: key and mask size (in bytes) -+ */ -+struct dpdbg_rule_cfg { -+ uint64_t key_iova; -+ uint64_t mask_iova; -+ uint64_t rule_iova; -+ uint8_t key_size; -+}; -+ -+/** -+ * dpdbg_set_ctlu_global_marking() - Set marking for all match rule frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @marking: The requested Debug marking -+ * @cfg: Marking rule to add -+ * -+ * Warning: must be called after dpdbg_prepare_global_rule() -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_ctlu_global_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t marking, -+ struct dpdbg_rule_cfg *cfg); -+ -+/** -+ * All traffic classes considered -+ */ -+#define DPDBG_DPNI_ALL_TCS (uint8_t)(-1) -+/** -+ * All flows within traffic class considered -+ */ -+#define DPDBG_DPNI_ALL_TC_FLOWS (uint8_t)(-1) -+/** -+ * All buffer pools considered -+ */ -+#define DPDBG_DPNI_ALL_DPBP (uint8_t)(-1) -+ -+/** -+ * struct dpdbg_dpni_rx_marking_cfg - Ingress frame configuration -+ * @tc_id: Traffic class ID (0-7); DPDBG_DPNI_ALL_TCS for all traffic classes. -+ * @flow_id: Rx flow id within the traffic class; use -+ * 'DPDBG_DPNI_ALL_TC_FLOWS' to set all flows within this tc_id; -+ * ignored if tc_id is set to 'DPDBG_DPNI_ALL_TCS'; -+ * @dpbp_id: buffer pool ID; 'DPDBG_DPNI_ALL_DPBP' to set all DPBP -+ * @marking: Marking for match frames; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ */ -+struct dpdbg_dpni_rx_marking_cfg { -+ uint8_t tc_id; -+ uint16_t flow_id; -+ uint16_t dpbp_id; -+ uint8_t marking; -+}; -+ -+/** -+ * dpdbg_set_dpni_rx_marking() - Set Rx frame marking for DPNI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @cfg: RX frame marking configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpni_rx_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_rx_marking_cfg *cfg); -+ -+/* selects global confirmation queues */ -+#define DPDBG_DPNI_GLOBAL_TX_CONF_QUEUE (uint16_t)(-1) -+ -+/** -+ * dpdbg_set_dpni_tx_conf_marking() - Set Tx frame marking for DPNI -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @sender_id: Sender Id for the confirmation queue; -+ * 'DPDBG_DPNI_GLOBAL_TX_CONF_QUEUE' for global confirmation queue -+ * @marking: The requested marking; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpni_tx_conf_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint16_t sender_id, -+ uint8_t marking); -+ -+/** -+ * dpdbg_set_dpio_marking() - Set debug frame marking on enqueue -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpio_id: The requested DPIO ID -+ * @marking: The requested marking; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpio_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpio_id, -+ uint8_t marking); -+ -+/** -+ * enum dpdbg_verbosity_level - Trace verbosity level -+ * @DPDBG_VERBOSITY_LEVEL_DISABLE: Trace disabled -+ * @DPDBG_VERBOSITY_LEVEL_TERSE: Terse trace -+ * @DPDBG_VERBOSITY_LEVEL_VERBOSE: Verbose trace -+ */ -+enum dpdbg_verbosity_level { -+ DPDBG_VERBOSITY_LEVEL_DISABLE = 0, -+ DPDBG_VERBOSITY_LEVEL_TERSE, -+ DPDBG_VERBOSITY_LEVEL_VERBOSE -+}; -+ -+/** -+ * dpdbg_set_ctlu_global_trace() - Set global trace configuration for CTLU trace -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @cfg: trace rule to add -+ * -+ * Warning: must be called after dpdbg_prepare_global_rule() -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_ctlu_global_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdbg_rule_cfg *cfg); -+ -+/** -+ * Number of DPIO trace points -+ */ -+#define DPDBG_NUM_OF_DPIO_TRACE_POINTS 2 -+ -+/** -+ * enum dpdbg_dpio_trace_type - Define Trace point type -+ * @DPDBG_DPIO_TRACE_TYPE_ENQUEUE: This trace point triggers when an enqueue -+ * command, received via this portal, -+ * and containing a marked frame, is executed -+ * @DPDBG_DPIO_TRACE_TYPE_DEFERRED: This trace point triggers when the deferred -+ * enqueue of a marked frame received via this -+ * portal completes -+ */ -+enum dpdbg_dpio_trace_type { -+ DPDBG_DPIO_TRACE_TYPE_ENQUEUE = 0, -+ DPDBG_DPIO_TRACE_TYPE_DEFERRED = 1 -+}; -+ -+/** -+ * struct dpdbg_dpio_trace_cfg - Configure the behavior of a trace point -+ * when a frame marked with the specified DD code point is -+ * encountered -+ * @marking: this field will be written into the DD field of every FD -+ * enqueued in this DPIO. -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ * @verbosity: Verbosity level -+ * @enqueue_type: Enqueue trace point type defining a full Key Generation -+ * profile (rule) -+ */ -+struct dpdbg_dpio_trace_cfg { -+ uint8_t marking; -+ enum dpdbg_verbosity_level verbosity; -+ enum dpdbg_dpio_trace_type enqueue_type; -+}; -+ -+/** -+ * dpdbg_set_dpio_trace() - Set trace for DPIO for every enqueued frame to -+ * the portal -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpio_id: The requested DPIO ID -+ * @trace_point: Trace points configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpio_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpio_id, -+ struct dpdbg_dpio_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPIO_TRACE_POINTS]); -+ -+/** -+ * struct dpdbg_dpni_trace_cfg - Configure the behavior of a trace point when a -+ * @tc_id: Traffic class ID (0-7); DPDBG_DPNI_ALL_TCS for all traffic classes. -+ * @flow_id: Rx flow id within the traffic class; use -+ * 'DPDBG_DPNI_ALL_TC_FLOWS' to set all flows within this tc_id; -+ * ignored if tc_id is set to 'DPDBG_DPNI_ALL_TCS'; -+ * @dpbp_id: buffer pool ID; 'DPDBG_DPNI_ALL_DPBP' to set all DPBP -+ * @marking: Marking for match frames; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ */ -+struct dpdbg_dpni_rx_trace_cfg { -+ uint8_t tc_id; -+ uint16_t flow_id; -+ uint16_t dpbp_id; -+ uint8_t marking; -+}; -+ -+/** -+ * dpdbg_set_dpni_rx_trace() - Set trace for DPNI ingress (WRIOP ingress). -+ * in case of multiple requests for different DPNIs - the trace -+ * will be for the latest DPNI requested. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @trace_cfg: Trace configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpni_rx_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_rx_trace_cfg *trace_cfg); -+ -+/** -+ * All DPNI senders -+ */ -+#define DPDBG_DPNI_ALL_SENDERS (uint16_t)(-1) -+ -+/** -+ * struct dpdbg_dpni_trace_cfg - Configure the behavior of a trace point when a -+ * frame marked with the specified DD code point is encountered -+ * @marking: The requested debug marking; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ */ -+struct dpdbg_dpni_tx_trace_cfg { -+ uint8_t marking; -+}; -+ -+/** -+ * dpdbg_set_dpni_tx_trace() - Set trace for DPNI dequeued frames -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @sender_id: Sender ID; 'DPDBG_DPNI_ALL_SENDERS' for all senders -+ * @trace_cfg: Trace configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpni_tx_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint16_t sender_id, -+ struct dpdbg_dpni_tx_trace_cfg *trace_cfg); -+ -+/** -+ * Number of DPCON trace points -+ */ -+#define DPDBG_NUM_OF_DPCON_TRACE_POINTS 2 -+ -+/** -+ * struct dpdbg_dpcon_trace_cfg - Configure the behavior of a trace point when a -+ * frame marked with the specified DD code point is encountered -+ * @marking: The requested debug marking; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ * @verbosity: Verbosity level -+ */ -+struct dpdbg_dpcon_trace_cfg { -+ uint8_t marking; -+ enum dpdbg_verbosity_level verbosity; -+}; -+ -+/** -+ * dpdbg_set_dpcon_trace() - Set trace for DPCON when a frame marked with a -+ * specified marking is dequeued from a WQ in the -+ * channel selected -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpcon_id: The requested DPCON ID -+ * @trace_point: Trace points configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpcon_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ struct dpdbg_dpcon_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPCON_TRACE_POINTS]); -+ -+/** -+ * Number of DPSECI trace points -+ */ -+#define DPDBG_NUM_OF_DPSECI_TRACE_POINTS 2 -+ -+/** -+ * struct dpdbg_dpseci_trace_cfg - Configure the behavior of a trace point when -+ * a frame marked with the specified DD code point is -+ * encountered -+ * @marking: The requested debug marking; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ * @verbosity: Verbosity level -+ */ -+struct dpdbg_dpseci_trace_cfg { -+ uint8_t marking; -+ enum dpdbg_verbosity_level verbosity; -+}; -+ -+/** -+ * dpdbg_set_dpseci_trace() - Set trace for DPSECI when a frame marked with the -+ * specific marking is enqueued via this portal. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpseci_id: The requested DPSECI ID -+ * @trace_point: Trace points configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpseci_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpseci_id, -+ struct dpdbg_dpseci_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPSECI_TRACE_POINTS]); -+ -+/** -+ * dpdbg_get_dpmac_counter() - DPMAC packet throughput -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpmac_id: The requested DPMAC ID -+ * @counter_type: The requested DPMAC counter -+ * @counter: Returned counter value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpmac_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpmac_id, -+ enum dpmac_counter counter_type, -+ uint64_t *counter); -+ -+/** -+ * dpdbg_get_dpni_counter() - DPNI packet throughput -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @counter_type: The requested DPNI counter -+ * @counter: Returned counter value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpni_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ enum dpni_counter counter_type, -+ uint64_t *counter); -+ -+#endif /* __FSL_DPDBG_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h -new file mode 100644 -index 0000000..b672788 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h -@@ -0,0 +1,249 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPDBG_CMD_H -+#define _FSL_DPDBG_CMD_H -+ -+/* DPDBG Version */ -+#define DPDBG_VER_MAJOR 1 -+#define DPDBG_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPDBG_CMDID_CLOSE 0x800 -+#define DPDBG_CMDID_OPEN 0x80F -+ -+#define DPDBG_CMDID_GET_ATTR 0x004 -+ -+#define DPDBG_CMDID_GET_DPNI_INFO 0x130 -+#define DPDBG_CMDID_GET_DPNI_PRIV_TX_CONF_FQID 0x131 -+#define DPDBG_CMDID_GET_DPCON_INFO 0x132 -+#define DPDBG_CMDID_GET_DPBP_INFO 0x133 -+#define DPDBG_CMDID_GET_DPCI_FQID 0x134 -+ -+#define DPDBG_CMDID_SET_CTLU_GLOBAL_MARKING 0x135 -+#define DPDBG_CMDID_SET_DPNI_RX_MARKING 0x136 -+#define DPDBG_CMDID_SET_DPNI_TX_CONF_MARKING 0x137 -+#define DPDBG_CMDID_SET_DPIO_MARKING 0x138 -+ -+#define DPDBG_CMDID_SET_CTLU_GLOBAL_TRACE 0x140 -+#define DPDBG_CMDID_SET_DPIO_TRACE 0x141 -+#define DPDBG_CMDID_SET_DPNI_RX_TRACE 0x142 -+#define DPDBG_CMDID_SET_DPNI_TX_TRACE 0x143 -+#define DPDBG_CMDID_SET_DPCON_TRACE 0x145 -+#define DPDBG_CMDID_SET_DPSECI_TRACE 0x146 -+ -+#define DPDBG_CMDID_GET_DPMAC_COUNTER 0x150 -+#define DPDBG_CMDID_GET_DPNI_COUNTER 0x151 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_OPEN(cmd, dpdbg_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdbg_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPNI_INFO(cmd, dpni_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPNI_INFO(cmd, info) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, info->qdid);\ -+ MC_RSP_OP(cmd, 1, 32, 8, uint8_t, info->max_senders);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, info->err_fqid);\ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, info->tx_conf_fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPNI_PRIV_TX_CONF_FQID(cmd, dpni_id, sender_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, sender_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPNI_PRIV_TX_CONF_FQID(cmd, fqid) \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, fqid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPCON_INFO(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPCON_INFO(cmd, info) \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, info->ch_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPBP_INFO(cmd, dpbp_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPBP_INFO(cmd, info) \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, info->bpid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPCI_FQID(cmd, dpci_id, priority) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpci_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, priority);\ -+} while (0) -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPCI_FQID(cmd, fqid) \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, fqid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_CTLU_GLOBAL_MARKING(cmd, marking, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, marking);\ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->rule_iova); \ -+} while (0) -+ -+#define DPDBG_CMD_SET_DPNI_RX_MARKING(cmd, dpni_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->tc_id);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->flow_id);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->dpbp_id);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->marking);\ -+} while (0) -+ -+#define DPDBG_CMD_SET_DPNI_TX_CONF_MARKING(cmd, dpni_id, sender_id, marking) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, sender_id);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, marking);\ -+} while (0) -+ -+#define DPDBG_CMD_SET_DPIO_MARKING(cmd, dpio_id, marking) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, marking);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_CTLU_GLOBAL_TRACE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->rule_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_DPIO_TRACE(cmd, dpio_id, trace_point) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \ -+ trace_point[0].verbosity); \ -+ MC_CMD_OP(cmd, 1, 4, 4, enum dpdbg_dpio_trace_type, \ -+ trace_point[0].enqueue_type); \ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \ -+ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \ -+ trace_point[1].verbosity); \ -+ MC_CMD_OP(cmd, 1, 36, 4, enum dpdbg_dpio_trace_type, \ -+ trace_point[1].enqueue_type); \ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_DPNI_RX_TRACE(cmd, dpni_id, trace_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, trace_cfg->tc_id);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, trace_cfg->flow_id);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, trace_cfg->dpbp_id);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, trace_cfg->marking);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_DPNI_TX_TRACE(cmd, dpni_id, sender_id, trace_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, sender_id);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, trace_cfg->marking);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_DPCON_TRACE(cmd, dpcon_id, trace_point) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \ -+ trace_point[0].verbosity); \ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \ -+ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \ -+ trace_point[1].verbosity); \ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_DPSECI_TRACE(cmd, dpseci_id, trace_point) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \ -+ trace_point[0].verbosity); \ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \ -+ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \ -+ trace_point[1].verbosity); \ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpmac_id, counter_type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id);\ -+ MC_CMD_OP(cmd, 0, 32, 16, enum dpmac_counter, counter_type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPMAC_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPNI_COUNTER(cmd, dpni_id, counter_type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 32, 16, enum dpni_counter, counter_type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPNI_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+#endif /* _FSL_DPDBG_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdcei.h b/drivers/net/dpaa2/mc/fsl_dpdcei.h -new file mode 100644 -index 0000000..319795c ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdcei.h -@@ -0,0 +1,515 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPDCEI_H -+#define __FSL_DPDCEI_H -+ -+/* Data Path DCE Interface API -+ * Contains initialization APIs and runtime control APIs for DPDCEI -+ */ -+ -+struct fsl_mc_io; -+ -+/** General DPDCEI macros */ -+ -+/** -+ * Indicates an invalid frame queue -+ */ -+#define DPDCEI_FQID_NOT_VALID (uint32_t)(-1) -+ -+/** -+ * enum dpdcei_engine - DCE engine block -+ * @DPDCEI_ENGINE_COMPRESSION: Engine compression -+ * @DPDCEI_ENGINE_DECOMPRESSION: Engine decompression -+ */ -+enum dpdcei_engine { -+ DPDCEI_ENGINE_COMPRESSION, -+ DPDCEI_ENGINE_DECOMPRESSION -+}; -+ -+/** -+ * dpdcei_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @dpdcei_id: DPDCEI unique ID -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpdcei_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdcei_id, -+ uint16_t *token); -+ -+/** -+ * dpdcei_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdcei_cfg - Structure representing DPDCEI configuration -+ * @engine: compression or decompression engine to be selected -+ * @priority: Priority for the DCE hardware processing (valid values 1-8). -+ */ -+struct dpdcei_cfg { -+ enum dpdcei_engine engine; -+ uint8_t priority; -+}; -+ -+/** -+ * dpdcei_create() - Create the DPDCEI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @cfg: configuration parameters -+ * -+ * Create the DPDCEI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpdcei_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdcei_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpdcei_destroy() - Destroy the DPDCEI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpdcei_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdcei_enable() - Enable the DPDCEI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdcei_disable() - Disable the DPDCEI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdcei_is_enabled() - Check if the DPDCEI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @en: Return '1' for object enabled/'0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpdcei_reset() - Reset the DPDCEI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdcei_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpdcei_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpdcei_set_irq() - Set IRQ information for the DPDCEI to trigger an interrupt -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdcei_irq_cfg *irq_cfg); -+ -+/** -+ * dpdcei_get_irq() - Get IRQ information from the DPDCEI -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdcei_irq_cfg *irq_cfg); -+ -+/** -+ * dpdcei_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpdcei_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned Interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpdcei_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpdcei_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpdcei_get_irq_status() - Get the current status of any pending interrupts -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpdcei_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+/** -+ * struct dpdcei_attr - Structure representing DPDCEI attributes -+ * @id: DPDCEI object ID -+ * @engine: DCE engine block -+ * @version: DPDCEI version -+ */ -+struct dpdcei_attr { -+ int id; -+ enum dpdcei_engine engine; -+ /** -+ * struct version - DPDCEI version -+ * @major: DPDCEI major version -+ * @minor: DPDCEI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpdcei_get_attributes() - Retrieve DPDCEI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_attr *attr); -+ -+/** -+ * enum dpdcei_dest - DPDCEI destination types -+ * @DPDCEI_DEST_NONE: Unassigned destination; The queue is set in parked mode -+ * and does not generate FQDAN notifications; -+ * user is expected to dequeue from the queue based on -+ * polling or other user-defined method -+ * @DPDCEI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to -+ * dequeue from the queue only after notification is -+ * received -+ * @DPDCEI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified -+ * DPCON object; -+ * user is expected to dequeue from the DPCON channel -+ */ -+enum dpdcei_dest { -+ DPDCEI_DEST_NONE = 0, -+ DPDCEI_DEST_DPIO = 1, -+ DPDCEI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpdcei_dest_cfg - Structure representing DPDCEI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPDCEI_DEST_NONE' option -+ */ -+struct dpdcei_dest_cfg { -+ enum dpdcei_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/** DPDCEI queue modification options */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPDCEI_QUEUE_OPT_USER_CTX 0x00000001 -+ -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPDCEI_QUEUE_OPT_DEST 0x00000002 -+ -+/** -+ * struct dpdcei_rx_queue_cfg - RX queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPDCEI_QUEUE_OPT_' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; -+ * valid only if 'DPDCEI_QUEUE_OPT_USER_CTX' is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPDCEI_QUEUE_OPT_DEST' is contained in 'options' -+ */ -+struct dpdcei_rx_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpdcei_dest_cfg dest_cfg; -+}; -+ -+/** -+ * dpdcei_set_rx_queue() - Set Rx queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @cfg: Rx queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpdcei_rx_queue_cfg *cfg); -+ -+/** -+ * struct dpdcei_rx_queue_attr - Structure representing attributes of Rx queues -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @fqid: Virtual FQID value to be used for dequeue operations -+ */ -+struct dpdcei_rx_queue_attr { -+ uint64_t user_ctx; -+ struct dpdcei_dest_cfg dest_cfg; -+ uint32_t fqid; -+}; -+ -+/** -+ * dpdcei_get_rx_queue() - Retrieve Rx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @attr: Returned Rx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_rx_queue_attr *attr); -+ -+/** -+ * struct dpdcei_tx_queue_attr - Structure representing attributes of Tx queues -+ * @fqid: Virtual FQID to be used for sending frames to DCE hardware -+ */ -+struct dpdcei_tx_queue_attr { -+ uint32_t fqid; -+}; -+ -+/** -+ * dpdcei_get_tx_queue() - Retrieve Tx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @attr: Returned Tx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_tx_queue_attr *attr); -+ -+#endif /* __FSL_DPDCEI_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h -new file mode 100644 -index 0000000..8452d88 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h -@@ -0,0 +1,182 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPDCEI_CMD_H -+#define _FSL_DPDCEI_CMD_H -+ -+/* DPDCEI Version */ -+#define DPDCEI_VER_MAJOR 1 -+#define DPDCEI_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPDCEI_CMDID_CLOSE 0x800 -+#define DPDCEI_CMDID_OPEN 0x80D -+#define DPDCEI_CMDID_CREATE 0x90D -+#define DPDCEI_CMDID_DESTROY 0x900 -+ -+#define DPDCEI_CMDID_ENABLE 0x002 -+#define DPDCEI_CMDID_DISABLE 0x003 -+#define DPDCEI_CMDID_GET_ATTR 0x004 -+#define DPDCEI_CMDID_RESET 0x005 -+#define DPDCEI_CMDID_IS_ENABLED 0x006 -+ -+#define DPDCEI_CMDID_SET_IRQ 0x010 -+#define DPDCEI_CMDID_GET_IRQ 0x011 -+#define DPDCEI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPDCEI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPDCEI_CMDID_SET_IRQ_MASK 0x014 -+#define DPDCEI_CMDID_GET_IRQ_MASK 0x015 -+#define DPDCEI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPDCEI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPDCEI_CMDID_SET_RX_QUEUE 0x1B0 -+#define DPDCEI_CMDID_GET_RX_QUEUE 0x1B1 -+#define DPDCEI_CMDID_GET_TX_QUEUE 0x1B2 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_OPEN(cmd, dpdcei_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdcei_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, enum dpdcei_engine, cfg->engine);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priority);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, enum dpdcei_engine, attr->engine); \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_SET_RX_QUEUE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpdcei_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_RX_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 48, 4, enum dpdcei_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_TX_QUEUE(cmd, attr) \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid) -+ -+#endif /* _FSL_DPDCEI_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdmai.h b/drivers/net/dpaa2/mc/fsl_dpdmai.h -new file mode 100644 -index 0000000..e931ce1 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdmai.h -@@ -0,0 +1,521 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPDMAI_H -+#define __FSL_DPDMAI_H -+ -+struct fsl_mc_io; -+ -+/* Data Path DMA Interface API -+ * Contains initialization APIs and runtime control APIs for DPDMAI -+ */ -+ -+/* General DPDMAI macros */ -+ -+/** -+ * Maximum number of Tx/Rx priorities per DPDMAI object -+ */ -+#define DPDMAI_PRIO_NUM 2 -+ -+/** -+ * All queues considered; see dpdmai_set_rx_queue() -+ */ -+#define DPDMAI_ALL_QUEUES (uint8_t)(-1) -+ -+/** -+ * dpdmai_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpdmai_id: DPDMAI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpdmai_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmai_id, -+ uint16_t *token); -+ -+/** -+ * dpdmai_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdmai_cfg - Structure representing DPDMAI configuration -+ * @priorities: Priorities for the DMA hardware processing; valid priorities are -+ * configured with values 1-8; the entry following last valid entry -+ * should be configured with 0 -+ */ -+struct dpdmai_cfg { -+ uint8_t priorities[DPDMAI_PRIO_NUM]; -+}; -+ -+/** -+ * dpdmai_create() - Create the DPDMAI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPDMAI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpdmai_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmai_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpdmai_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmai_is_enabled() - Check if the DPDMAI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdmai_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpdmai_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmai_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmai_get_irq() - Get IRQ information from the DPDMAI -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmai_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmai_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpdmai_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned Interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpdmai_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpdmai_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpdmai_get_irq_status() - Get the current status of any pending interrupts -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpdmai_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpdmai_attr - Structure representing DPDMAI attributes -+ * @id: DPDMAI object ID -+ * @version: DPDMAI version -+ * @num_of_priorities: number of priorities -+ */ -+struct dpdmai_attr { -+ int id; -+ /** -+ * struct version - DPDMAI version -+ * @major: DPDMAI major version -+ * @minor: DPDMAI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint8_t num_of_priorities; -+}; -+ -+/** -+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmai_attr *attr); -+ -+/** -+ * enum dpdmai_dest - DPDMAI destination types -+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode -+ * and does not generate FQDAN notifications; user is expected to dequeue -+ * from the queue based on polling or other user-defined method -+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to dequeue -+ * from the queue only after notification is received -+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified DPCON object; -+ * user is expected to dequeue from the DPCON channel -+ */ -+enum dpdmai_dest { -+ DPDMAI_DEST_NONE = 0, -+ DPDMAI_DEST_DPIO = 1, -+ DPDMAI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPDMAI_DEST_NONE' option -+ */ -+struct dpdmai_dest_cfg { -+ enum dpdmai_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/* DPDMAI queue modification options */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001 -+ -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPDMAI_QUEUE_OPT_DEST 0x00000002 -+ -+/** -+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPDMAI_QUEUE_OPT_' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; -+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options' -+ */ -+struct dpdmai_rx_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpdmai_dest_cfg dest_cfg; -+ -+}; -+ -+/** -+ * dpdmai_set_rx_queue() - Set Rx queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPDMAI creation; use -+ * DPDMAI_ALL_QUEUES to configure all Rx queues -+ * identically. -+ * @cfg: Rx queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ const struct dpdmai_rx_queue_cfg *cfg); -+ -+/** -+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @fqid: Virtual FQID value to be used for dequeue operations -+ */ -+struct dpdmai_rx_queue_attr { -+ uint64_t user_ctx; -+ struct dpdmai_dest_cfg dest_cfg; -+ uint32_t fqid; -+}; -+ -+/** -+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPDMAI creation -+ * @attr: Returned Rx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpdmai_rx_queue_attr *attr); -+ -+/** -+ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues -+ * @fqid: Virtual FQID to be used for sending frames to DMA hardware -+ */ -+ -+struct dpdmai_tx_queue_attr { -+ uint32_t fqid; -+}; -+ -+/** -+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPDMAI creation -+ * @attr: Returned Tx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpdmai_tx_queue_attr *attr); -+ -+#endif /* __FSL_DPDMAI_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h -new file mode 100644 -index 0000000..7c4a31a ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h -@@ -0,0 +1,191 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPDMAI_CMD_H -+#define _FSL_DPDMAI_CMD_H -+ -+/* DPDMAI Version */ -+#define DPDMAI_VER_MAJOR 2 -+#define DPDMAI_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPDMAI_CMDID_CLOSE 0x800 -+#define DPDMAI_CMDID_OPEN 0x80E -+#define DPDMAI_CMDID_CREATE 0x90E -+#define DPDMAI_CMDID_DESTROY 0x900 -+ -+#define DPDMAI_CMDID_ENABLE 0x002 -+#define DPDMAI_CMDID_DISABLE 0x003 -+#define DPDMAI_CMDID_GET_ATTR 0x004 -+#define DPDMAI_CMDID_RESET 0x005 -+#define DPDMAI_CMDID_IS_ENABLED 0x006 -+ -+#define DPDMAI_CMDID_SET_IRQ 0x010 -+#define DPDMAI_CMDID_GET_IRQ 0x011 -+#define DPDMAI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPDMAI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPDMAI_CMDID_SET_IRQ_MASK 0x014 -+#define DPDMAI_CMDID_GET_IRQ_MASK 0x015 -+#define DPDMAI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPDMAI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPDMAI_CMDID_SET_RX_QUEUE 0x1A0 -+#define DPDMAI_CMDID_GET_RX_QUEUE 0x1A1 -+#define DPDMAI_CMDID_GET_TX_QUEUE 0x1A2 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid) -+ -+#endif /* _FSL_DPDMAI_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux.h b/drivers/net/dpaa2/mc/fsl_dpdmux.h -new file mode 100644 -index 0000000..455a042 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdmux.h -@@ -0,0 +1,724 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPDMUX_H -+#define __FSL_DPDMUX_H -+ -+#include -+ -+struct fsl_mc_io; -+ -+/* Data Path Demux API -+ * Contains API for handling DPDMUX topology and functionality -+ */ -+ -+/** -+ * dpdmux_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpdmux_id: DPDMUX unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpdmux_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmux_id, -+ uint16_t *token); -+ -+/** -+ * dpdmux_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPDMUX general options -+ */ -+ -+/** -+ * Enable bridging between internal interfaces -+ */ -+#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL -+ -+#define DPDMUX_IRQ_INDEX_IF 0x0000 -+#define DPDMUX_IRQ_INDEX 0x0001 -+ -+/** -+ * IRQ event - Indicates that the link state changed -+ */ -+#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001 -+ -+/** -+ * enum dpdmux_manip - DPDMUX manipulation operations -+ * @DPDMUX_MANIP_NONE: No manipulation on frames -+ * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress -+ */ -+enum dpdmux_manip { -+ DPDMUX_MANIP_NONE = 0x0, -+ DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1 -+}; -+ -+/** -+ * enum dpdmux_method - DPDMUX method options -+ * @DPDMUX_METHOD_NONE: no DPDMUX method -+ * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address -+ * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address -+ * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN -+ * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN -+ */ -+enum dpdmux_method { -+ DPDMUX_METHOD_NONE = 0x0, -+ DPDMUX_METHOD_C_VLAN_MAC = 0x1, -+ DPDMUX_METHOD_MAC = 0x2, -+ DPDMUX_METHOD_C_VLAN = 0x3, -+ DPDMUX_METHOD_S_VLAN = 0x4 -+}; -+ -+/** -+ * struct dpdmux_cfg - DPDMUX configuration parameters -+ * @method: Defines the operation method for the DPDMUX address table -+ * @manip: Required manipulation operation -+ * @num_ifs: Number of interfaces (excluding the uplink interface) -+ * @adv: Advanced parameters; default is all zeros; -+ * use this structure to change default settings -+ */ -+struct dpdmux_cfg { -+ enum dpdmux_method method; -+ enum dpdmux_manip manip; -+ uint16_t num_ifs; -+ /** -+ * struct adv - Advanced parameters -+ * @options: DPDMUX options - combination of 'DPDMUX_OPT_' flags -+ * @max_dmat_entries: Maximum entries in DPDMUX address table -+ * 0 - indicates default: 64 entries per interface. -+ * @max_mc_groups: Number of multicast groups in DPDMUX table -+ * 0 - indicates default: 32 multicast groups -+ * @max_vlan_ids: max vlan ids allowed in the system - -+ * relevant only case of working in mac+vlan method. -+ * 0 - indicates default 16 vlan ids. -+ */ -+ struct { -+ uint64_t options; -+ uint16_t max_dmat_entries; -+ uint16_t max_mc_groups; -+ uint16_t max_vlan_ids; -+ } adv; -+}; -+ -+/** -+ * dpdmux_create() - Create the DPDMUX object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPDMUX object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpdmux_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmux_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpdmux_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmux_enable() - Enable DPDMUX functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmux_disable() - Disable DPDMUX functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmux_is_enabled() - Check if the DPDMUX is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdmux_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpdmux_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpdmux_set_irq() - Set IRQ information for the DPDMUX to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmux_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmux_get_irq() - Get IRQ information from the DPDMUX. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmux_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmux_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpdmux_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpdmux_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpdmux_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpdmux_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpdmux_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpdmux_attr - Structure representing DPDMUX attributes -+ * @id: DPDMUX object ID -+ * @version: DPDMUX version -+ * @options: Configuration options (bitmap) -+ * @method: DPDMUX address table method -+ * @manip: DPDMUX manipulation type -+ * @num_ifs: Number of interfaces (excluding the uplink interface) -+ * @mem_size: DPDMUX frame storage memory size -+ */ -+struct dpdmux_attr { -+ int id; -+ /** -+ * struct version - DPDMUX version -+ * @major: DPDMUX major version -+ * @minor: DPDMUX minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint64_t options; -+ enum dpdmux_method method; -+ enum dpdmux_manip manip; -+ uint16_t num_ifs; -+ uint16_t mem_size; -+}; -+ -+/** -+ * dpdmux_get_attributes() - Retrieve DPDMUX attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmux_attr *attr); -+ -+/** -+ * dpdmux_ul_set_max_frame_length() - Set the maximum frame length in DPDMUX -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @max_frame_length: The required maximum frame length -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length); -+ -+/** -+ * enum dpdmux_counter_type - Counter types -+ * @DPDMUX_CNT_ING_FRAME: Counts ingress frames -+ * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes -+ * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames -+ * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames -+ * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames -+ * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes -+ * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames -+ * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes -+ * @DPDMUX_CNT_EGR_FRAME: Counts egress frames -+ * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes -+ * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames -+ */ -+enum dpdmux_counter_type { -+ DPDMUX_CNT_ING_FRAME = 0x0, -+ DPDMUX_CNT_ING_BYTE = 0x1, -+ DPDMUX_CNT_ING_FLTR_FRAME = 0x2, -+ DPDMUX_CNT_ING_FRAME_DISCARD = 0x3, -+ DPDMUX_CNT_ING_MCAST_FRAME = 0x4, -+ DPDMUX_CNT_ING_MCAST_BYTE = 0x5, -+ DPDMUX_CNT_ING_BCAST_FRAME = 0x6, -+ DPDMUX_CNT_ING_BCAST_BYTES = 0x7, -+ DPDMUX_CNT_EGR_FRAME = 0x8, -+ DPDMUX_CNT_EGR_BYTE = 0x9, -+ DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa -+}; -+ -+/** -+ * enum dpdmux_accepted_frames_type - DPDMUX frame types -+ * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and -+ * priority-tagged frames -+ * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or -+ * priority-tagged frames that are received on this -+ * interface -+ * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames -+ * received on this interface are accepted -+ */ -+enum dpdmux_accepted_frames_type { -+ DPDMUX_ADMIT_ALL = 0, -+ DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1, -+ DPDMUX_ADMIT_ONLY_UNTAGGED = 2 -+}; -+ -+/** -+ * enum dpdmux_action - DPDMUX action for un-accepted frames -+ * @DPDMUX_ACTION_DROP: Drop un-accepted frames -+ * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the -+ * control interface -+ */ -+enum dpdmux_action { -+ DPDMUX_ACTION_DROP = 0, -+ DPDMUX_ACTION_REDIRECT_TO_CTRL = 1 -+}; -+ -+/** -+ * struct dpdmux_accepted_frames - Frame types configuration -+ * @type: Defines ingress accepted frames -+ * @unaccept_act: Defines action on frames not accepted -+ */ -+struct dpdmux_accepted_frames { -+ enum dpdmux_accepted_frames_type type; -+ enum dpdmux_action unaccept_act; -+}; -+ -+/** -+ * dpdmux_if_set_accepted_frames() - Set the accepted frame types -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs); -+ * @cfg: Frame types configuration -+ * -+ * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or -+ * priority-tagged frames are discarded. -+ * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or -+ * priority-tagged frames are accepted. -+ * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged, -+ * untagged and priority-tagged frame are accepted; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_accepted_frames *cfg); -+ -+/** -+ * struct dpdmux_if_attr - Structure representing frame types configuration -+ * @rate: Configured interface rate (in bits per second) -+ * @enabled: Indicates if interface is enabled -+ * @accept_frame_type: Indicates type of accepted frames for the interface -+ */ -+struct dpdmux_if_attr { -+ uint32_t rate; -+ int enabled; -+ enum dpdmux_accepted_frames_type accept_frame_type; -+}; -+ -+/** -+ * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs); -+ * @attr: Interface attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_if_attr *attr); -+ -+/** -+ * struct dpdmux_l2_rule - Structure representing L2 rule -+ * @mac_addr: MAC address -+ * @vlan_id: VLAN ID -+ */ -+struct dpdmux_l2_rule { -+ uint8_t mac_addr[6]; -+ uint16_t vlan_id; -+}; -+ -+/** -+ * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Destination interface ID -+ * @rule: L2 rule -+ * -+ * Function removes a L2 rule from DPDMUX table -+ * or adds an interface to an existing multicast address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule); -+ -+/** -+ * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Destination interface ID -+ * @rule: L2 rule -+ * -+ * Function adds a L2 rule into DPDMUX table -+ * or adds an interface to an existing multicast address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule); -+ -+/** -+* dpdmux_if_get_counter() - Functions obtains specific counter of an interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPDMUX object -+* @if_id: Interface Id -+* @counter_type: counter type -+* @counter: Returned specific counter information -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpdmux_counter_type counter_type, -+ uint64_t *counter); -+ -+/** -+* dpdmux_ul_reset_counters() - Function resets the uplink counter -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPDMUX object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values -+ */ -+struct dpdmux_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpdmux_if_set_link_cfg() - set the link configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @cfg: Link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_cfg *cfg); -+/** -+ * struct dpdmux_link_state - Structure representing DPDMUX link state -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values -+ * @up: 0 - down, 1 - up -+ */ -+struct dpdmux_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpdmux_if_get_link_state - Return the link state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @state: link state -+ * -+ * @returns '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_state *state); -+ -+#endif /* __FSL_DPDMUX_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h -new file mode 100644 -index 0000000..0a5cf17 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h -@@ -0,0 +1,256 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPDMUX_CMD_H -+#define _FSL_DPDMUX_CMD_H -+ -+/* DPDMUX Version */ -+#define DPDMUX_VER_MAJOR 5 -+#define DPDMUX_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPDMUX_CMDID_CLOSE 0x800 -+#define DPDMUX_CMDID_OPEN 0x806 -+#define DPDMUX_CMDID_CREATE 0x906 -+#define DPDMUX_CMDID_DESTROY 0x900 -+ -+#define DPDMUX_CMDID_ENABLE 0x002 -+#define DPDMUX_CMDID_DISABLE 0x003 -+#define DPDMUX_CMDID_GET_ATTR 0x004 -+#define DPDMUX_CMDID_RESET 0x005 -+#define DPDMUX_CMDID_IS_ENABLED 0x006 -+ -+#define DPDMUX_CMDID_SET_IRQ 0x010 -+#define DPDMUX_CMDID_GET_IRQ 0x011 -+#define DPDMUX_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPDMUX_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPDMUX_CMDID_SET_IRQ_MASK 0x014 -+#define DPDMUX_CMDID_GET_IRQ_MASK 0x015 -+#define DPDMUX_CMDID_GET_IRQ_STATUS 0x016 -+#define DPDMUX_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH 0x0a1 -+ -+#define DPDMUX_CMDID_UL_RESET_COUNTERS 0x0a3 -+ -+#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES 0x0a7 -+#define DPDMUX_CMDID_IF_GET_ATTR 0x0a8 -+ -+#define DPDMUX_CMDID_IF_ADD_L2_RULE 0x0b0 -+#define DPDMUX_CMDID_IF_REMOVE_L2_RULE 0x0b1 -+#define DPDMUX_CMDID_IF_GET_COUNTER 0x0b2 -+#define DPDMUX_CMDID_IF_SET_LINK_CFG 0x0b3 -+#define DPDMUX_CMDID_IF_GET_LINK_STATE 0x0b4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_OPEN(cmd, dpdmux_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmux_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, enum dpdmux_method, cfg->method);\ -+ MC_CMD_OP(cmd, 0, 8, 8, enum dpdmux_manip, cfg->manip);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_dmat_entries);\ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_mc_groups);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.max_vlan_ids);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) \ -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+#define DPDMUX_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 8, enum dpdmux_method, attr->method);\ -+ MC_RSP_OP(cmd, 0, 8, 8, enum dpdmux_manip, attr->manip);\ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->num_ifs);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->mem_size);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\ -+ MC_RSP_OP(cmd, 4, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 4, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpdmux_accepted_frames_type, cfg->type);\ -+ MC_CMD_OP(cmd, 0, 20, 4, enum dpdmux_unaccepted_frames_action, \ -+ cfg->unaccept_act);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_GET_ATTR(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IF_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 56, 4, enum dpdmux_accepted_frames_type, \ -+ attr->accept_frame_type);\ -+ MC_RSP_OP(cmd, 0, 24, 1, int, attr->enabled);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rate);\ -+} while (0) -+ -+#define DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, l2_rule) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\ -+} while (0) -+ -+#define DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, l2_rule) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, enum dpdmux_counter_type, counter_type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IF_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ -+} while (0) -+ -+#endif /* _FSL_DPDMUX_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpio.h b/drivers/net/dpaa2/mc/fsl_dpio.h -new file mode 100644 -index 0000000..88a492f ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpio.h -@@ -0,0 +1,460 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPIO_H -+#define __FSL_DPIO_H -+ -+/* Data Path I/O Portal API -+ * Contains initialization APIs and runtime control APIs for DPIO -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpio_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpio_id: DPIO unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpio_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpio_id, -+ uint16_t *token); -+ -+/** -+ * dpio_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpio_channel_mode - DPIO notification channel mode -+ * @DPIO_NO_CHANNEL: No support for notification channel -+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a -+ * dedicated channel in the DPIO; user should point the queue's -+ * destination in the relevant interface to this DPIO -+ */ -+enum dpio_channel_mode { -+ DPIO_NO_CHANNEL = 0, -+ DPIO_LOCAL_CHANNEL = 1, -+}; -+ -+/** -+ * struct dpio_cfg - Structure representing DPIO configuration -+ * @channel_mode: Notification channel mode -+ * @num_priorities: Number of priorities for the notification channel (1-8); -+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' -+ */ -+struct dpio_cfg { -+ enum dpio_channel_mode channel_mode; -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpio_create() - Create the DPIO object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPIO object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpio_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpio_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpio_destroy() - Destroy the DPIO object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_enable() - Enable the DPIO, allow I/O portal operations. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_is_enabled() - Check if the DPIO is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpio_reset() - Reset the DPIO, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_set_stashing_destination() - Set the stashing destination. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @sdest: stashing destination value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t sdest); -+ -+/** -+ * dpio_get_stashing_destination() - Get the stashing destination.. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @sdest: Returns the stashing destination value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t *sdest); -+ -+/** -+ * dpio_add_static_dequeue_channel() - Add a static dequeue channel. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @dpcon_id: DPCON object ID -+ * @channel_index: Returned channel index to be used in qbman API -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ uint8_t *channel_index); -+ -+/** -+ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @dpcon_id: DPCON object ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id); -+ -+/** -+ * DPIO IRQ Index and Events -+ */ -+ -+/** -+ * Irq software-portal index -+ */ -+#define DPIO_IRQ_SWP_INDEX 0 -+ -+/** -+ * struct dpio_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpio_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpio_set_irq() - Set IRQ information for the DPIO to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpio_irq_cfg *irq_cfg); -+ -+/** -+ * dpio_get_irq() - Get IRQ information from the DPIO. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpio_irq_cfg *irq_cfg); -+ -+/** -+ * dpio_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpio_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpio_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpio_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpio_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpio_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpio_attr - Structure representing DPIO attributes -+ * @id: DPIO object ID -+ * @version: DPIO version -+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area -+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area -+ * @qbman_portal_id: Software portal ID -+ * @channel_mode: Notification channel mode -+ * @num_priorities: Number of priorities for the notification channel (1-8); -+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' -+ * @qbman_version: QBMAN version -+ */ -+struct dpio_attr { -+ int id; -+ /** -+ * struct version - DPIO version -+ * @major: DPIO major version -+ * @minor: DPIO minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint64_t qbman_portal_ce_offset; -+ uint64_t qbman_portal_ci_offset; -+ uint16_t qbman_portal_id; -+ enum dpio_channel_mode channel_mode; -+ uint8_t num_priorities; -+ uint32_t qbman_version; -+}; -+ -+/** -+ * dpio_get_attributes() - Retrieve DPIO attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpio_attr *attr); -+#endif /* __FSL_DPIO_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpio_cmd.h b/drivers/net/dpaa2/mc/fsl_dpio_cmd.h -new file mode 100644 -index 0000000..f339cd6 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpio_cmd.h -@@ -0,0 +1,184 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPIO_CMD_H -+#define _FSL_DPIO_CMD_H -+ -+/* DPIO Version */ -+#define DPIO_VER_MAJOR 3 -+#define DPIO_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPIO_CMDID_CLOSE 0x800 -+#define DPIO_CMDID_OPEN 0x803 -+#define DPIO_CMDID_CREATE 0x903 -+#define DPIO_CMDID_DESTROY 0x900 -+ -+#define DPIO_CMDID_ENABLE 0x002 -+#define DPIO_CMDID_DISABLE 0x003 -+#define DPIO_CMDID_GET_ATTR 0x004 -+#define DPIO_CMDID_RESET 0x005 -+#define DPIO_CMDID_IS_ENABLED 0x006 -+ -+#define DPIO_CMDID_SET_IRQ 0x010 -+#define DPIO_CMDID_GET_IRQ 0x011 -+#define DPIO_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPIO_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPIO_CMDID_SET_IRQ_MASK 0x014 -+#define DPIO_CMDID_GET_IRQ_MASK 0x015 -+#define DPIO_CMDID_GET_IRQ_STATUS 0x016 -+#define DPIO_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPIO_CMDID_SET_STASHING_DEST 0x120 -+#define DPIO_CMDID_GET_STASHING_DEST 0x121 -+#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x122 -+#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x123 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_OPEN(cmd, dpio_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \ -+ cfg->channel_mode);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_portal_id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ -+ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode, attr->channel_mode);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->qbman_portal_ce_offset);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, attr->qbman_portal_ci_offset);\ -+ MC_RSP_OP(cmd, 3, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 3, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->qbman_version);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+#endif /* _FSL_DPIO_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h -new file mode 100644 -index 0000000..b2bceaf ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpkg.h -@@ -0,0 +1,174 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPKG_H_ -+#define __FSL_DPKG_H_ -+ -+#include -+ -+/* Data Path Key Generator API -+ * Contains initialization APIs and runtime APIs for the Key Generator -+ */ -+ -+/** Key Generator properties */ -+ -+/** -+ * Number of masks per key extraction -+ */ -+#define DPKG_NUM_OF_MASKS 4 -+/** -+ * Number of extractions per key profile -+ */ -+#define DPKG_MAX_NUM_OF_EXTRACTS 10 -+ -+/** -+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types -+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset -+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field -+ * @DPKG_FULL_FIELD: Extract a full field -+ */ -+enum dpkg_extract_from_hdr_type { -+ DPKG_FROM_HDR = 0, -+ DPKG_FROM_FIELD = 1, -+ DPKG_FULL_FIELD = 2 -+}; -+ -+/** -+ * enum dpkg_extract_type - Enumeration for selecting extraction type -+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header -+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header -+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; -+ * e.g. can be used to extract header existence; -+ * please refer to 'Parse Result definition' section in the parser BG -+ */ -+enum dpkg_extract_type { -+ DPKG_EXTRACT_FROM_HDR = 0, -+ DPKG_EXTRACT_FROM_DATA = 1, -+ DPKG_EXTRACT_FROM_PARSE = 3 -+}; -+ -+/** -+ * struct dpkg_mask - A structure for defining a single extraction mask -+ * @mask: Byte mask for the extracted content -+ * @offset: Offset within the extracted content -+ */ -+struct dpkg_mask { -+ uint8_t mask; -+ uint8_t offset; -+}; -+ -+/** -+ * struct dpkg_extract - A structure for defining a single extraction -+ * @type: Determines how the union below is interpreted: -+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; -+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; -+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' -+ * @extract: Selects extraction method -+ * @num_of_byte_masks: Defines the number of valid entries in the array below; -+ * This is also the number of bytes to be used as masks -+ * @masks: Masks parameters -+ */ -+struct dpkg_extract { -+ enum dpkg_extract_type type; -+ /** -+ * union extract - Selects extraction method -+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' -+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' -+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' -+ */ -+ union { -+ /** -+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' -+ * @prot: Any of the supported headers -+ * @type: Defines the type of header extraction: -+ * DPKG_FROM_HDR: use size & offset below; -+ * DPKG_FROM_FIELD: use field, size and offset below; -+ * DPKG_FULL_FIELD: use field below -+ * @field: One of the supported fields (NH_FLD_) -+ * -+ * @size: Size in bytes -+ * @offset: Byte offset -+ * @hdr_index: Clear for cases not listed below; -+ * Used for protocols that may have more than a single -+ * header, 0 indicates an outer header; -+ * Supported protocols (possible values): -+ * NET_PROT_VLAN (0, HDR_INDEX_LAST); -+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); -+ * NET_PROT_IP(0, HDR_INDEX_LAST); -+ * NET_PROT_IPv4(0, HDR_INDEX_LAST); -+ * NET_PROT_IPv6(0, HDR_INDEX_LAST); -+ */ -+ -+ struct { -+ enum net_prot prot; -+ enum dpkg_extract_from_hdr_type type; -+ uint32_t field; -+ uint8_t size; -+ uint8_t offset; -+ uint8_t hdr_index; -+ } from_hdr; -+ /** -+ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' -+ * @size: Size in bytes -+ * @offset: Byte offset -+ */ -+ struct { -+ uint8_t size; -+ uint8_t offset; -+ } from_data; -+ -+ /** -+ * struct from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' -+ * @size: Size in bytes -+ * @offset: Byte offset -+ */ -+ struct { -+ uint8_t size; -+ uint8_t offset; -+ } from_parse; -+ } extract; -+ -+ uint8_t num_of_byte_masks; -+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; -+}; -+ -+/** -+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation -+ * profile (rule) -+ * @num_extracts: Defines the number of valid entries in the array below -+ * @extracts: Array of required extractions -+ */ -+struct dpkg_profile_cfg { -+ uint8_t num_extracts; -+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; -+}; -+ -+#endif /* __FSL_DPKG_H_ */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmac.h b/drivers/net/dpaa2/mc/fsl_dpmac.h -new file mode 100644 -index 0000000..ad27772 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmac.h -@@ -0,0 +1,593 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMAC_H -+#define __FSL_DPMAC_H -+ -+/* Data Path MAC API -+ * Contains initialization APIs and runtime control APIs for DPMAC -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpmac_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpmac_id: DPMAC unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpmac_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmac_id, -+ uint16_t *token); -+ -+/** -+ * dpmac_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpmac_link_type - DPMAC link type -+ * @DPMAC_LINK_TYPE_NONE: No link -+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type -+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID -+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type -+ */ -+enum dpmac_link_type { -+ DPMAC_LINK_TYPE_NONE, -+ DPMAC_LINK_TYPE_FIXED, -+ DPMAC_LINK_TYPE_PHY, -+ DPMAC_LINK_TYPE_BACKPLANE -+}; -+ -+/** -+ * enum dpmac_eth_if - DPMAC Ethrnet interface -+ * @DPMAC_ETH_IF_MII: MII interface -+ * @DPMAC_ETH_IF_RMII: RMII interface -+ * @DPMAC_ETH_IF_SMII: SMII interface -+ * @DPMAC_ETH_IF_GMII: GMII interface -+ * @DPMAC_ETH_IF_RGMII: RGMII interface -+ * @DPMAC_ETH_IF_SGMII: SGMII interface -+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface -+ * @DPMAC_ETH_IF_XAUI: XAUI interface -+ * @DPMAC_ETH_IF_XFI: XFI interface -+ */ -+enum dpmac_eth_if { -+ DPMAC_ETH_IF_MII, -+ DPMAC_ETH_IF_RMII, -+ DPMAC_ETH_IF_SMII, -+ DPMAC_ETH_IF_GMII, -+ DPMAC_ETH_IF_RGMII, -+ DPMAC_ETH_IF_SGMII, -+ DPMAC_ETH_IF_QSGMII, -+ DPMAC_ETH_IF_XAUI, -+ DPMAC_ETH_IF_XFI -+}; -+ -+/** -+ * struct dpmac_cfg - Structure representing DPMAC configuration -+ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, -+ * the MAC IDs are continuous. -+ * For example: 2 WRIOPs, 16 MACs in each: -+ * MAC IDs for the 1st WRIOP: 1-16, -+ * MAC IDs for the 2nd WRIOP: 17-32. -+ */ -+struct dpmac_cfg { -+ int mac_id; -+}; -+ -+/** -+ * dpmac_create() - Create the DPMAC object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPMAC object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpmac_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpmac_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPMAC IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPMAC_IRQ_INDEX 0 -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 -+/** -+ * IRQ event - Indicates that the link state changed -+ */ -+#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 -+ -+/** -+ * struct dpmac_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpmac_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmac_irq_cfg *irq_cfg); -+ -+/** -+ * dpmac_get_irq() - Get IRQ information from the DPMAC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmac_irq_cfg *irq_cfg); -+ -+/** -+ * dpmac_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpmac_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpmac_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpmac_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpmac_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpmac_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpmac_attr - Structure representing DPMAC attributes -+ * @id: DPMAC object ID -+ * @phy_id: PHY ID -+ * @link_type: link type -+ * @eth_if: Ethernet interface -+ * @max_rate: Maximum supported rate - in Mbps -+ * @version: DPMAC version -+ */ -+struct dpmac_attr { -+ int id; -+ int phy_id; -+ enum dpmac_link_type link_type; -+ enum dpmac_eth_if eth_if; -+ uint32_t max_rate; -+ /** -+ * struct version - Structure representing DPMAC version -+ * @major: DPMAC major version -+ * @minor: DPMAC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpmac_get_attributes - Retrieve DPMAC attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_attr *attr); -+ -+/** -+ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters -+ * @phy_addr: MDIO device address -+ * @reg: Address of the register within the Clause 45 PHY device from which data -+ * is to be read -+ * @data: Data read/write from/to MDIO -+ */ -+struct dpmac_mdio_cfg { -+ uint8_t phy_addr; -+ uint8_t reg; -+ uint16_t data; -+}; -+ -+/** -+ * dpmac_mdio_read() - Perform MDIO read transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/** -+ * dpmac_mdio_write() - Perform MDIO write transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/** -+ * DPMAC link configuration/state options -+ */ -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpmac_link_cfg - Structure representing DPMAC link configuration -+ * @rate: Link's rate - in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ */ -+struct dpmac_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpmac_get_link_cfg() - Get Ethernet link configuration -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Returned structure with the link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_cfg *cfg); -+ -+/** -+ * struct dpmac_link_state - DPMAC link configuration request -+ * @rate: Rate in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ * @up: Link state -+ */ -+struct dpmac_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpmac_set_link_state() - Set the Ethernet link status -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @link_state: Link state configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_state *link_state); -+ -+/** -+ * enum dpmac_counter - DPMAC counter types -+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger -+ * (up to max frame length specified), -+ * good or bad. -+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received -+ * with a wrong CRC -+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length -+ * specified, with a bad frame check sequence. -+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors. -+ * Occurs when a receive FIFO overflows. -+ * Includes also frames truncated as a result of -+ * the receive FIFO overflow. -+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error -+ * (optional used for wrong SFD). -+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64 -+ * bytes long with a good CRC. -+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length -+ * specified, with a good frame check sequence. -+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC) -+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted -+ * (regular and PFC). -+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid -+ * frames and valid pause frames. -+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames. -+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames. -+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received. -+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames. -+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error -+ * (except for undersized/fragment frame). -+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid -+ * frames and valid pause frames transmitted. -+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames. -+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames. -+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames. -+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error. -+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including -+ * pause frames. -+ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including -+ * pause frames. -+ */ -+enum dpmac_counter { -+ DPMAC_CNT_ING_FRAME_64, -+ DPMAC_CNT_ING_FRAME_127, -+ DPMAC_CNT_ING_FRAME_255, -+ DPMAC_CNT_ING_FRAME_511, -+ DPMAC_CNT_ING_FRAME_1023, -+ DPMAC_CNT_ING_FRAME_1518, -+ DPMAC_CNT_ING_FRAME_1519_MAX, -+ DPMAC_CNT_ING_FRAG, -+ DPMAC_CNT_ING_JABBER, -+ DPMAC_CNT_ING_FRAME_DISCARD, -+ DPMAC_CNT_ING_ALIGN_ERR, -+ DPMAC_CNT_EGR_UNDERSIZED, -+ DPMAC_CNT_ING_OVERSIZED, -+ DPMAC_CNT_ING_VALID_PAUSE_FRAME, -+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, -+ DPMAC_CNT_ING_BYTE, -+ DPMAC_CNT_ING_MCAST_FRAME, -+ DPMAC_CNT_ING_BCAST_FRAME, -+ DPMAC_CNT_ING_ALL_FRAME, -+ DPMAC_CNT_ING_UCAST_FRAME, -+ DPMAC_CNT_ING_ERR_FRAME, -+ DPMAC_CNT_EGR_BYTE, -+ DPMAC_CNT_EGR_MCAST_FRAME, -+ DPMAC_CNT_EGR_BCAST_FRAME, -+ DPMAC_CNT_EGR_UCAST_FRAME, -+ DPMAC_CNT_EGR_ERR_FRAME, -+ DPMAC_CNT_ING_GOOD_FRAME, -+ DPMAC_CNT_ENG_GOOD_FRAME -+}; -+ -+/** -+ * dpmac_get_counter() - Read a specific DPMAC counter -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @type: The requested counter -+ * @counter: Returned counter value -+ * -+ * Return: The requested counter; '0' otherwise. -+ */ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter); -+ -+#endif /* __FSL_DPMAC_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h -new file mode 100644 -index 0000000..dc00590 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h -@@ -0,0 +1,195 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPMAC_CMD_H -+#define _FSL_DPMAC_CMD_H -+ -+/* DPMAC Version */ -+#define DPMAC_VER_MAJOR 3 -+#define DPMAC_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPMAC_CMDID_CLOSE 0x800 -+#define DPMAC_CMDID_OPEN 0x80c -+#define DPMAC_CMDID_CREATE 0x90c -+#define DPMAC_CMDID_DESTROY 0x900 -+ -+#define DPMAC_CMDID_GET_ATTR 0x004 -+#define DPMAC_CMDID_RESET 0x005 -+ -+#define DPMAC_CMDID_SET_IRQ 0x010 -+#define DPMAC_CMDID_GET_IRQ 0x011 -+#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPMAC_CMDID_SET_IRQ_MASK 0x014 -+#define DPMAC_CMDID_GET_IRQ_MASK 0x015 -+#define DPMAC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPMAC_CMDID_MDIO_READ 0x0c0 -+#define DPMAC_CMDID_MDIO_WRITE 0x0c1 -+#define DPMAC_CMDID_GET_LINK_CFG 0x0c2 -+#define DPMAC_CMDID_SET_LINK_STATE 0x0c3 -+#define DPMAC_CMDID_GET_COUNTER 0x0c4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_OPEN(cmd, dpmac_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\ -+ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_READ(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_MDIO_READ(cmd, data) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_COUNTER(cmd, type) \ -+ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+#endif /* _FSL_DPMAC_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmcp.h b/drivers/net/dpaa2/mc/fsl_dpmcp.h -new file mode 100644 -index 0000000..80f238e ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmcp.h -@@ -0,0 +1,332 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMCP_H -+#define __FSL_DPMCP_H -+ -+/* Data Path Management Command Portal API -+ * Contains initialization APIs and runtime control APIs for DPMCP -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpmcp_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpmcp_id: DPMCP unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpmcp_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmcp_id, -+ uint16_t *token); -+ -+/** -+ * Get portal ID from pool -+ */ -+#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1) -+ -+/** -+ * dpmcp_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpmcp_cfg - Structure representing DPMCP configuration -+ * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID -+ * from pool -+ */ -+struct dpmcp_cfg { -+ int portal_id; -+}; -+ -+/** -+ * dpmcp_create() - Create the DPMCP object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPMCP object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpmcp_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmcp_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpmcp_destroy() - Destroy the DPMCP object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpmcp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * IRQ -+ */ -+ -+/** -+ * IRQ Index -+ */ -+#define DPMCP_IRQ_INDEX 0 -+/** -+ * irq event - Indicates that the link state changed -+ */ -+#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001 -+ -+/** -+ * struct dpmcp_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpmcp_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmcp_irq_cfg *irq_cfg); -+ -+/** -+ * dpmcp_get_irq() - Get IRQ information from the DPMCP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmcp_irq_cfg *irq_cfg); -+ -+/** -+ * dpmcp_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpmcp_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpmcp_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpmcp_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpmcp_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * struct dpmcp_attr - Structure representing DPMCP attributes -+ * @id: DPMCP object ID -+ * @version: DPMCP version -+ */ -+struct dpmcp_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPMCP version -+ * @major: DPMCP major version -+ * @minor: DPMCP minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpmcp_get_attributes - Retrieve DPMCP attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmcp_attr *attr); -+ -+#endif /* __FSL_DPMCP_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h -new file mode 100644 -index 0000000..8f710bd ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h -@@ -0,0 +1,135 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPMCP_CMD_H -+#define _FSL_DPMCP_CMD_H -+ -+/* DPMCP Version */ -+#define DPMCP_VER_MAJOR 3 -+#define DPMCP_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPMCP_CMDID_CLOSE 0x800 -+#define DPMCP_CMDID_OPEN 0x80b -+#define DPMCP_CMDID_CREATE 0x90b -+#define DPMCP_CMDID_DESTROY 0x900 -+ -+#define DPMCP_CMDID_GET_ATTR 0x004 -+#define DPMCP_CMDID_RESET 0x005 -+ -+#define DPMCP_CMDID_SET_IRQ 0x010 -+#define DPMCP_CMDID_GET_IRQ 0x011 -+#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPMCP_CMDID_SET_IRQ_MASK 0x014 -+#define DPMCP_CMDID_GET_IRQ_MASK 0x015 -+#define DPMCP_CMDID_GET_IRQ_STATUS 0x016 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_OPEN(cmd, dpmcp_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmcp_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->portal_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+#endif /* _FSL_DPMCP_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmng.h b/drivers/net/dpaa2/mc/fsl_dpmng.h -new file mode 100644 -index 0000000..4468dea ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmng.h -@@ -0,0 +1,74 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMNG_H -+#define __FSL_DPMNG_H -+ -+/* Management Complex General API -+ * Contains general API for the Management Complex firmware -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * Management Complex firmware version information -+ */ -+#define MC_VER_MAJOR 9 -+#define MC_VER_MINOR 0 -+ -+/** -+ * struct mc_versoin -+ * @major: Major version number: incremented on API compatibility changes -+ * @minor: Minor version number: incremented on API additions (that are -+ * backward compatible); reset when major version is incremented -+ * @revision: Internal revision number: incremented on implementation changes -+ * and/or bug fixes that have no impact on API -+ */ -+struct mc_version { -+ uint32_t major; -+ uint32_t minor; -+ uint32_t revision; -+}; -+ -+/** -+ * mc_get_version() - Retrieves the Management Complex firmware -+ * version information -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @mc_ver_info: Returned version information structure -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int mc_get_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ struct mc_version *mc_ver_info); -+ -+#endif /* __FSL_DPMNG_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h -new file mode 100644 -index 0000000..c34ca3a ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h -@@ -0,0 +1,46 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMNG_CMD_H -+#define __FSL_DPMNG_CMD_H -+ -+/* Command IDs */ -+#define DPMNG_CMDID_GET_VERSION 0x831 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMNG_RSP_GET_VERSION(cmd, mc_ver_info) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mc_ver_info->revision); \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, mc_ver_info->major); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, mc_ver_info->minor); \ -+} while (0) -+ -+#endif /* __FSL_DPMNG_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h -new file mode 100644 -index 0000000..c820086 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpni.h -@@ -0,0 +1,2581 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPNI_H -+#define __FSL_DPNI_H -+ -+#include -+ -+struct fsl_mc_io; -+ -+/** -+ * Data Path Network Interface API -+ * Contains initialization APIs and runtime control APIs for DPNI -+ */ -+ -+/** General DPNI macros */ -+ -+/** -+ * Maximum number of traffic classes -+ */ -+#define DPNI_MAX_TC 8 -+/** -+ * Maximum number of buffer pools per DPNI -+ */ -+#define DPNI_MAX_DPBP 8 -+/** -+ * Maximum number of storage-profiles per DPNI -+ */ -+#define DPNI_MAX_SP 2 -+ -+/** -+ * All traffic classes considered; see dpni_set_rx_flow() -+ */ -+#define DPNI_ALL_TCS (uint8_t)(-1) -+/** -+ * All flows within traffic class considered; see dpni_set_rx_flow() -+ */ -+#define DPNI_ALL_TC_FLOWS (uint16_t)(-1) -+/** -+ * Generate new flow ID; see dpni_set_tx_flow() -+ */ -+#define DPNI_NEW_FLOW_ID (uint16_t)(-1) -+/* use for common tx-conf queue; see dpni_set_tx_conf_() */ -+#define DPNI_COMMON_TX_CONF (uint16_t)(-1) -+ -+/** -+ * dpni_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpni_id: DPNI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpni_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpni_id, -+ uint16_t *token); -+ -+/** -+ * dpni_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/* DPNI configuration options */ -+ -+/** -+ * Allow different distribution key profiles for different traffic classes; -+ * if not set, a single key profile is assumed -+ */ -+#define DPNI_OPT_ALLOW_DIST_KEY_PER_TC 0x00000001 -+ -+/** -+ * Disable all non-error transmit confirmation; error frames are reported -+ * back to a common Tx error queue -+ */ -+#define DPNI_OPT_TX_CONF_DISABLED 0x00000002 -+ -+/** -+ * Disable per-sender private Tx confirmation/error queue -+ */ -+#define DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED 0x00000004 -+ -+/** -+ * Support distribution based on hashed key; -+ * allows statistical distribution over receive queues in a traffic class -+ */ -+#define DPNI_OPT_DIST_HASH 0x00000010 -+ -+/** -+ * DEPRECATED - if this flag is selected and and all new 'max_fs_entries' are -+ * '0' then backward compatibility is preserved; -+ * Support distribution based on flow steering; -+ * allows explicit control of distribution over receive queues in a traffic -+ * class -+ */ -+#define DPNI_OPT_DIST_FS 0x00000020 -+ -+/** -+ * Unicast filtering support -+ */ -+#define DPNI_OPT_UNICAST_FILTER 0x00000080 -+/** -+ * Multicast filtering support -+ */ -+#define DPNI_OPT_MULTICAST_FILTER 0x00000100 -+/** -+ * VLAN filtering support -+ */ -+#define DPNI_OPT_VLAN_FILTER 0x00000200 -+/** -+ * Support IP reassembly on received packets -+ */ -+#define DPNI_OPT_IPR 0x00000800 -+/** -+ * Support IP fragmentation on transmitted packets -+ */ -+#define DPNI_OPT_IPF 0x00001000 -+/** -+ * VLAN manipulation support -+ */ -+#define DPNI_OPT_VLAN_MANIPULATION 0x00010000 -+/** -+ * Support masking of QoS lookup keys -+ */ -+#define DPNI_OPT_QOS_MASK_SUPPORT 0x00020000 -+/** -+ * Support masking of Flow Steering lookup keys -+ */ -+#define DPNI_OPT_FS_MASK_SUPPORT 0x00040000 -+ -+/** -+ * struct dpni_extended_cfg - Structure representing extended DPNI configuration -+ * @tc_cfg: TCs configuration -+ * @ipr_cfg: IP reassembly configuration -+ */ -+struct dpni_extended_cfg { -+ /** -+ * struct tc_cfg - TC configuration -+ * @max_dist: Maximum distribution size for Rx traffic class; -+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, -+ * 112,128,192,224,256,384,448,512,768,896,1024; -+ * value '0' will be treated as '1'. -+ * other unsupported values will be round down to the nearest -+ * supported value. -+ * @max_fs_entries: Maximum FS entries for Rx traffic class; -+ * '0' means no support for this TC; -+ */ -+ struct { -+ uint16_t max_dist; -+ uint16_t max_fs_entries; -+ } tc_cfg[DPNI_MAX_TC]; -+ /** -+ * struct ipr_cfg - Structure representing IP reassembly configuration -+ * @max_reass_frm_size: Maximum size of the reassembled frame -+ * @min_frag_size_ipv4: Minimum fragment size of IPv4 fragments -+ * @min_frag_size_ipv6: Minimum fragment size of IPv6 fragments -+ * @max_open_frames_ipv4: Maximum concurrent IPv4 packets in reassembly -+ * process -+ * @max_open_frames_ipv6: Maximum concurrent IPv6 packets in reassembly -+ * process -+ */ -+ struct { -+ uint16_t max_reass_frm_size; -+ uint16_t min_frag_size_ipv4; -+ uint16_t min_frag_size_ipv6; -+ uint16_t max_open_frames_ipv4; -+ uint16_t max_open_frames_ipv6; -+ } ipr_cfg; -+}; -+ -+/** -+ * dpni_prepare_extended_cfg() - function prepare extended parameters -+ * @cfg: extended structure -+ * @ext_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpni_create() -+ */ -+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, -+ uint8_t *ext_cfg_buf); -+ -+/** -+ * struct dpni_cfg - Structure representing DPNI configuration -+ * @mac_addr: Primary MAC address -+ * @adv: Advanced parameters; default is all zeros; -+ * use this structure to change default settings -+ */ -+struct dpni_cfg { -+ uint8_t mac_addr[6]; -+ /** -+ * struct adv - Advanced parameters -+ * @options: Mask of available options; use 'DPNI_OPT_' values -+ * @start_hdr: Selects the packet starting header for parsing; -+ * 'NET_PROT_NONE' is treated as default: 'NET_PROT_ETH' -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; Non-power-of-2 values are rounded -+ * up to the next power-of-2 value as hardware demands it; -+ * '0' will be treated as '1' -+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx); -+ * '0' will e treated as '1' -+ * @max_unicast_filters: Maximum number of unicast filters; -+ * '0' is treated as '16' -+ * @max_multicast_filters: Maximum number of multicast filters; -+ * '0' is treated as '64' -+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in -+ * the QoS table; '0' is treated as '64' -+ * @max_qos_key_size: Maximum key size for the QoS look-up; -+ * '0' is treated as '24' which is enough for IPv4 -+ * 5-tuple -+ * @max_dist_key_size: Maximum key size for the distribution; -+ * '0' is treated as '24' which is enough for IPv4 5-tuple -+ * @max_policers: Maximum number of policers; -+ * should be between '0' and max_tcs -+ * @max_congestion_ctrl: Maximum number of congestion control groups -+ * (CGs); covers early drop and congestion notification -+ * requirements; -+ * should be between '0' and ('max_tcs' + 'max_senders') -+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory -+ * filled with the extended configuration by calling -+ * dpni_prepare_extended_cfg() -+ */ -+ struct { -+ uint32_t options; -+ enum net_prot start_hdr; -+ uint8_t max_senders; -+ uint8_t max_tcs; -+ uint8_t max_unicast_filters; -+ uint8_t max_multicast_filters; -+ uint8_t max_vlan_filters; -+ uint8_t max_qos_entries; -+ uint8_t max_qos_key_size; -+ uint8_t max_dist_key_size; -+ uint8_t max_policers; -+ uint8_t max_congestion_ctrl; -+ uint64_t ext_cfg_iova; -+ } adv; -+}; -+ -+/** -+ * dpni_create() - Create the DPNI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPNI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpni_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpni_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpni_destroy() - Destroy the DPNI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpni_pools_cfg - Structure representing buffer pools configuration -+ * @num_dpbp: Number of DPBPs -+ * @pools: Array of buffer pools parameters; The number of valid entries -+ * must match 'num_dpbp' value -+ */ -+struct dpni_pools_cfg { -+ uint8_t num_dpbp; -+ /** -+ * struct pools - Buffer pools parameters -+ * @dpbp_id: DPBP object ID -+ * @buffer_size: Buffer size -+ * @backup_pool: Backup pool -+ */ -+ struct { -+ int dpbp_id; -+ uint16_t buffer_size; -+ int backup_pool; -+ } pools[DPNI_MAX_DPBP]; -+}; -+ -+/** -+ * dpni_set_pools() - Set buffer pools configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Buffer pools configuration -+ * -+ * mandatory for DPNI operation -+ * warning:Allowed only when DPNI is disabled -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_pools_cfg *cfg); -+ -+/** -+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_is_enabled() - Check if the DPNI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_reset() - Reset the DPNI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPNI IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPNI_IRQ_INDEX 0 -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 -+ -+/** -+ * struct dpni_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpni_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpni_set_irq() - Set IRQ information for the DPNI to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpni_irq_cfg *irq_cfg); -+ -+/** -+ * dpni_get_irq() - Get IRQ information from the DPNI. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpni_irq_cfg *irq_cfg); -+ -+/** -+ * dpni_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state: - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpni_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpni_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpni_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpni_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpni_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpni_attr - Structure representing DPNI attributes -+ * @id: DPNI object ID -+ * @version: DPNI version -+ * @start_hdr: Indicates the packet starting header for parsing -+ * @options: Mask of available options; reflects the value as was given in -+ * object's creation -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; -+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx) -+ * @max_unicast_filters: Maximum number of unicast filters -+ * @max_multicast_filters: Maximum number of multicast filters -+ * @max_vlan_filters: Maximum number of VLAN filters -+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in QoS table -+ * @max_qos_key_size: Maximum key size for the QoS look-up -+ * @max_dist_key_size: Maximum key size for the distribution look-up -+ * @max_policers: Maximum number of policers; -+ * @max_congestion_ctrl: Maximum number of congestion control groups (CGs); -+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory; -+ * call dpni_extract_extended_cfg() to extract the extended configuration -+ */ -+struct dpni_attr { -+ int id; -+ /** -+ * struct version - DPNI version -+ * @major: DPNI major version -+ * @minor: DPNI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ enum net_prot start_hdr; -+ uint32_t options; -+ uint8_t max_senders; -+ uint8_t max_tcs; -+ uint8_t max_unicast_filters; -+ uint8_t max_multicast_filters; -+ uint8_t max_vlan_filters; -+ uint8_t max_qos_entries; -+ uint8_t max_qos_key_size; -+ uint8_t max_dist_key_size; -+ uint8_t max_policers; -+ uint8_t max_congestion_ctrl; -+ uint64_t ext_cfg_iova; -+}; -+ -+/** -+ * dpni_get_attributes() - Retrieve DPNI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @attr: Object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_attr *attr); -+ -+/** -+ * dpni_extract_extended_cfg() - extract the extended parameters -+ * @cfg: extended structure -+ * @ext_cfg_buf: 256 bytes of DMA-able memory -+ * -+ * This function has to be called after dpni_get_attributes() -+ */ -+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, -+ const uint8_t *ext_cfg_buf); -+ -+/** -+ * DPNI errors -+ */ -+ -+/** -+ * Extract out of frame header error -+ */ -+#define DPNI_ERROR_EOFHE 0x00020000 -+/** -+ * Frame length error -+ */ -+#define DPNI_ERROR_FLE 0x00002000 -+/** -+ * Frame physical error -+ */ -+#define DPNI_ERROR_FPE 0x00001000 -+/** -+ * Parsing header error -+ */ -+#define DPNI_ERROR_PHE 0x00000020 -+/** -+ * Parser L3 checksum error -+ */ -+#define DPNI_ERROR_L3CE 0x00000004 -+/** -+ * Parser L3 checksum error -+ */ -+#define DPNI_ERROR_L4CE 0x00000001 -+ -+/** -+ * enum dpni_error_action - Defines DPNI behavior for errors -+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame -+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow -+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue -+ */ -+enum dpni_error_action { -+ DPNI_ERROR_ACTION_DISCARD = 0, -+ DPNI_ERROR_ACTION_CONTINUE = 1, -+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 -+}; -+ -+/** -+ * struct dpni_error_cfg - Structure representing DPNI errors treatment -+ * @errors: Errors mask; use 'DPNI_ERROR__ -+ * @error_action: The desired action for the errors mask -+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation -+ * status (FAS); relevant only for the non-discard action -+ */ -+struct dpni_error_cfg { -+ uint32_t errors; -+ enum dpni_error_action error_action; -+ int set_frame_annotation; -+}; -+ -+/** -+ * dpni_set_errors_behavior() - Set errors behavior -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Errors configuration -+ * -+ * this function may be called numerous times with different -+ * error masks -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_error_cfg *cfg); -+ -+/** -+ * DPNI buffer layout modification options -+ */ -+ -+/** -+ * Select to modify the time-stamp setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 -+/** -+ * Select to modify the parser-result setting; not applicable for Tx -+ */ -+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 -+/** -+ * Select to modify the frame-status setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 -+/** -+ * Select to modify the private-data-size setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 -+/** -+ * Select to modify the data-alignment setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 -+/** -+ * Select to modify the data-head-room setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 -+/** -+ * Select to modify the data-tail-room setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 -+ -+/** -+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout -+ * @options: Flags representing the suggested modifications to the buffer -+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_' flags -+ * @pass_timestamp: Pass timestamp value -+ * @pass_parser_result: Pass parser results -+ * @pass_frame_status: Pass frame status -+ * @private_data_size: Size kept for private data (in bytes) -+ * @data_align: Data alignment -+ * @data_head_room: Data head room -+ * @data_tail_room: Data tail room -+ */ -+struct dpni_buffer_layout { -+ uint32_t options; -+ int pass_timestamp; -+ int pass_parser_result; -+ int pass_frame_status; -+ uint16_t private_data_size; -+ uint16_t data_align; -+ uint16_t data_head_room; -+ uint16_t data_tail_room; -+}; -+ -+/** -+ * dpni_get_rx_buffer_layout() - Retrieve Rx buffer layout attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_rx_buffer_layout() - Set Rx buffer layout configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_get_tx_buffer_layout() - Retrieve Tx buffer layout attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_tx_buffer_layout() - Set Tx buffer layout configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_get_tx_conf_buffer_layout() - Retrieve Tx confirmation buffer layout -+ * attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_tx_conf_buffer_layout() - Set Tx confirmation buffer layout -+ * configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_l3_chksum_validation() - Enable/disable L3 checksum validation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_l3_chksum_validation() - Get L3 checksum validation mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_l4_chksum_validation() - Enable/disable L4 checksum validation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_l4_chksum_validation() - Get L4 checksum validation mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used -+ * for enqueue operations -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @qdid: Returned virtual QDID value that should be used as an argument -+ * in all enqueue operations -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_qdid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *qdid); -+ -+/** -+ * struct dpni_sp_info - Structure representing DPNI storage-profile information -+ * (relevant only for DPNI owned by AIOP) -+ * @spids: array of storage-profiles -+ */ -+struct dpni_sp_info { -+ uint16_t spids[DPNI_MAX_SP]; -+}; -+ -+/** -+ * dpni_get_spids() - Get the AIOP storage profile IDs associated with the DPNI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @sp_info: Returned AIOP storage-profile information -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Only relevant for DPNI that belongs to AIOP container. -+ */ -+int dpni_get_sp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_sp_info *sp_info); -+ -+/** -+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @data_offset: Tx data offset (from start of buffer) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *data_offset); -+ -+/** -+ * enum dpni_counter - DPNI counter types -+ * @DPNI_CNT_ING_FRAME: Counts ingress frames -+ * @DPNI_CNT_ING_BYTE: Counts ingress bytes -+ * @DPNI_CNT_ING_FRAME_DROP: Counts ingress frames dropped due to explicit -+ * 'drop' setting -+ * @DPNI_CNT_ING_FRAME_DISCARD: Counts ingress frames discarded due to errors -+ * @DPNI_CNT_ING_MCAST_FRAME: Counts ingress multicast frames -+ * @DPNI_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes -+ * @DPNI_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames -+ * @DPNI_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes -+ * @DPNI_CNT_EGR_FRAME: Counts egress frames -+ * @DPNI_CNT_EGR_BYTE: Counts egress bytes -+ * @DPNI_CNT_EGR_FRAME_DISCARD: Counts egress frames discarded due to errors -+ */ -+enum dpni_counter { -+ DPNI_CNT_ING_FRAME = 0x0, -+ DPNI_CNT_ING_BYTE = 0x1, -+ DPNI_CNT_ING_FRAME_DROP = 0x2, -+ DPNI_CNT_ING_FRAME_DISCARD = 0x3, -+ DPNI_CNT_ING_MCAST_FRAME = 0x4, -+ DPNI_CNT_ING_MCAST_BYTE = 0x5, -+ DPNI_CNT_ING_BCAST_FRAME = 0x6, -+ DPNI_CNT_ING_BCAST_BYTES = 0x7, -+ DPNI_CNT_EGR_FRAME = 0x8, -+ DPNI_CNT_EGR_BYTE = 0x9, -+ DPNI_CNT_EGR_FRAME_DISCARD = 0xa -+}; -+ -+/** -+ * dpni_get_counter() - Read a specific DPNI counter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @counter: The requested counter -+ * @value: Returned counter's current value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t *value); -+ -+/** -+ * dpni_set_counter() - Set (or clear) a specific DPNI counter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @counter: The requested counter -+ * @value: New counter value; typically pass '0' for resetting -+ * the counter. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t value); -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct - Structure representing DPNI link configuration -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values -+ */ -+struct dpni_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpni_set_link_cfg() - set the link configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_link_cfg *cfg); -+ -+/** -+ * struct dpni_link_state - Structure representing DPNI link state -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values -+ * @up: Link state; '0' for down, '1' for up -+ */ -+struct dpni_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpni_get_link_state() - Return the link state (either up or down) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @state: Returned link state; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_link_state *state); -+ -+/** -+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration -+ * @rate_limit: rate in Mbps -+ * @max_burst_size: burst size in bytes (up to 64KB) -+ */ -+struct dpni_tx_shaping_cfg { -+ uint32_t rate_limit; -+ uint16_t max_burst_size; -+}; -+ -+/** -+ * dpni_set_tx_shaping() - Set the transmit shaping -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tx_shaper: tx shaping configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_shaping_cfg *tx_shaper); -+ -+/** -+ * dpni_set_max_frame_length() - Set the maximum received frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @max_frame_length: Maximum received frame length (in -+ * bytes); frame is discarded if its -+ * length exceeds this value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length); -+ -+/** -+ * dpni_get_max_frame_length() - Get the maximum received frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @max_frame_length: Maximum received frame length (in -+ * bytes); frame is discarded if its -+ * length exceeds this value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *max_frame_length); -+ -+/** -+ * dpni_set_mtu() - Set the MTU for the interface. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mtu: MTU length (in bytes) -+ * -+ * MTU determines the maximum fragment size for performing IP -+ * fragmentation on egress packets. -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t mtu); -+ -+/** -+ * dpni_get_mtu() - Get the MTU. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mtu: Returned MTU length (in bytes) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *mtu); -+ -+/** -+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_primary_mac_addr() - Set the primary MAC address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to set as primary address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_get_primary_mac_addr() - Get the primary MAC address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: Returned MAC address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t mac_addr[6]); -+ -+/** -+ * dpni_add_mac_addr() - Add MAC address filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to add -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_remove_mac_addr() - Remove MAC address filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @unicast: Set to '1' to clear unicast addresses -+ * @multicast: Set to '1' to clear multicast addresses -+ * -+ * The primary MAC address is not cleared by this operation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast); -+ -+/** -+ * dpni_set_vlan_filters() - Enable/disable VLAN filtering mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_add_vlan_id() - Add VLAN ID filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @vlan_id: VLAN ID to add -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * dpni_remove_vlan_id() - Remove VLAN ID filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @vlan_id: VLAN ID to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * dpni_clear_vlan_filters() - Clear all VLAN filters -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode -+ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority -+ * @DPNI_TX_SCHED_WEIGHTED: weighted based scheduling -+ */ -+enum dpni_tx_schedule_mode { -+ DPNI_TX_SCHED_STRICT_PRIORITY, -+ DPNI_TX_SCHED_WEIGHTED, -+}; -+ -+/** -+ * struct dpni_tx_schedule_cfg - Structure representing Tx -+ * scheduling configuration -+ * @mode: scheduling mode -+ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000; -+ * not applicable for 'strict-priority' mode; -+ */ -+struct dpni_tx_schedule_cfg { -+ enum dpni_tx_schedule_mode mode; -+ uint16_t delta_bandwidth; -+}; -+ -+/** -+ * struct dpni_tx_selection_cfg - Structure representing transmission -+ * selection configuration -+ * @tc_sched: an array of traffic-classes -+ */ -+struct dpni_tx_selection_cfg { -+ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC]; -+}; -+ -+/** -+ * dpni_set_tx_selection() - Set transmission selection configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: transmission selection configuration -+ * -+ * warning: Allowed only when DPNI is disabled -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_selection_cfg *cfg); -+ -+/** -+ * enum dpni_dist_mode - DPNI distribution mode -+ * @DPNI_DIST_MODE_NONE: No distribution -+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if -+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation -+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if -+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation -+ */ -+enum dpni_dist_mode { -+ DPNI_DIST_MODE_NONE = 0, -+ DPNI_DIST_MODE_HASH = 1, -+ DPNI_DIST_MODE_FS = 2 -+}; -+ -+/** -+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action -+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame -+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id -+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash -+ */ -+enum dpni_fs_miss_action { -+ DPNI_FS_MISS_DROP = 0, -+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1, -+ DPNI_FS_MISS_HASH = 2 -+}; -+ -+/** -+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration -+ * @miss_action: Miss action selection -+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' -+ */ -+struct dpni_fs_tbl_cfg { -+ enum dpni_fs_miss_action miss_action; -+ uint16_t default_flow_id; -+}; -+ -+/** -+ * dpni_prepare_key_cfg() - function prepare extract parameters -+ * @cfg: defining a full Key Generation profile (rule) -+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before the following functions: -+ * - dpni_set_rx_tc_dist() -+ * - dpni_set_qos_table() -+ */ -+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, -+ uint8_t *key_cfg_buf); -+ -+/** -+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration -+ * @dist_size: Set the distribution size; -+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, -+ * 112,128,192,224,256,384,448,512,768,896,1024 -+ * @dist_mode: Distribution mode -+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with -+ * the extractions to be used for the distribution key by calling -+ * dpni_prepare_key_cfg() relevant only when -+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' -+ * @fs_cfg: Flow Steering table configuration; only relevant if -+ * 'dist_mode = DPNI_DIST_MODE_FS' -+ */ -+struct dpni_rx_tc_dist_cfg { -+ uint16_t dist_size; -+ enum dpni_dist_mode dist_mode; -+ uint64_t key_cfg_iova; -+ struct dpni_fs_tbl_cfg fs_cfg; -+}; -+ -+/** -+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class distribution configuration -+ * -+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg() -+ * first to prepare the key_cfg_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_dist_cfg *cfg); -+ -+/** -+ * Set to select color aware mode (otherwise - color blind) -+ */ -+#define DPNI_POLICER_OPT_COLOR_AWARE 0x00000001 -+/** -+ * Set to discard frame with RED color -+ */ -+#define DPNI_POLICER_OPT_DISCARD_RED 0x00000002 -+ -+/** -+ * enum dpni_policer_mode - selecting the policer mode -+ * @DPNI_POLICER_MODE_NONE: Policer is disabled -+ * @DPNI_POLICER_MODE_PASS_THROUGH: Policer pass through -+ * @DPNI_POLICER_MODE_RFC_2698: Policer algorithm RFC 2698 -+ * @DPNI_POLICER_MODE_RFC_4115: Policer algorithm RFC 4115 -+ */ -+enum dpni_policer_mode { -+ DPNI_POLICER_MODE_NONE = 0, -+ DPNI_POLICER_MODE_PASS_THROUGH, -+ DPNI_POLICER_MODE_RFC_2698, -+ DPNI_POLICER_MODE_RFC_4115 -+}; -+ -+/** -+ * enum dpni_policer_unit - DPNI policer units -+ * @DPNI_POLICER_UNIT_BYTES: bytes units -+ * @DPNI_POLICER_UNIT_FRAMES: frames units -+ */ -+enum dpni_policer_unit { -+ DPNI_POLICER_UNIT_BYTES = 0, -+ DPNI_POLICER_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpni_policer_color - selecting the policer color -+ * @DPNI_POLICER_COLOR_GREEN: Green color -+ * @DPNI_POLICER_COLOR_YELLOW: Yellow color -+ * @DPNI_POLICER_COLOR_RED: Red color -+ */ -+enum dpni_policer_color { -+ DPNI_POLICER_COLOR_GREEN = 0, -+ DPNI_POLICER_COLOR_YELLOW, -+ DPNI_POLICER_COLOR_RED -+}; -+ -+/** -+ * struct dpni_rx_tc_policing_cfg - Policer configuration -+ * @options: Mask of available options; use 'DPNI_POLICER_OPT_' values -+ * @mode: policer mode -+ * @default_color: For pass-through mode the policer re-colors with this -+ * color any incoming packets. For Color aware non-pass-through mode: -+ * policer re-colors with this color all packets with FD[DROPP]>2. -+ * @units: Bytes or Packets -+ * @cir: Committed information rate (CIR) in Kbps or packets/second -+ * @cbs: Committed burst size (CBS) in bytes or packets -+ * @eir: Peak information rate (PIR, rfc2698) in Kbps or packets/second -+ * Excess information rate (EIR, rfc4115) in Kbps or packets/second -+ * @ebs: Peak burst size (PBS, rfc2698) in bytes or packets -+ * Excess burst size (EBS, rfc4115) in bytes or packets -+ */ -+struct dpni_rx_tc_policing_cfg { -+ uint32_t options; -+ enum dpni_policer_mode mode; -+ enum dpni_policer_unit units; -+ enum dpni_policer_color default_color; -+ uint32_t cir; -+ uint32_t cbs; -+ uint32_t eir; -+ uint32_t ebs; -+}; -+ -+/** -+ * dpni_set_rx_tc_policing() - Set Rx traffic class policing configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class policing configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_policing_cfg *cfg); -+ -+/** -+ * dpni_get_rx_tc_policing() - Get Rx traffic class policing configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class policing configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_rx_tc_policing_cfg *cfg); -+ -+/** -+ * enum dpni_congestion_unit - DPNI congestion units -+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units -+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units -+ */ -+enum dpni_congestion_unit { -+ DPNI_CONGESTION_UNIT_BYTES = 0, -+ DPNI_CONGESTION_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpni_early_drop_mode - DPNI early drop mode -+ * @DPNI_EARLY_DROP_MODE_NONE: early drop is disabled -+ * @DPNI_EARLY_DROP_MODE_TAIL: early drop in taildrop mode -+ * @DPNI_EARLY_DROP_MODE_WRED: early drop in WRED mode -+ */ -+enum dpni_early_drop_mode { -+ DPNI_EARLY_DROP_MODE_NONE = 0, -+ DPNI_EARLY_DROP_MODE_TAIL, -+ DPNI_EARLY_DROP_MODE_WRED -+}; -+ -+/** -+ * struct dpni_wred_cfg - WRED configuration -+ * @max_threshold: maximum threshold that packets may be discarded. Above this -+ * threshold all packets are discarded; must be less than 2^39; -+ * approximated to be expressed as (x+256)*2^(y-1) due to HW -+ * implementation. -+ * @min_threshold: minimum threshold that packets may be discarded at -+ * @drop_probability: probability that a packet will be discarded (1-100, -+ * associated with the max_threshold). -+ */ -+struct dpni_wred_cfg { -+ uint64_t max_threshold; -+ uint64_t min_threshold; -+ uint8_t drop_probability; -+}; -+ -+/** -+ * struct dpni_early_drop_cfg - early-drop configuration -+ * @mode: drop mode -+ * @units: units type -+ * @green: WRED - 'green' configuration -+ * @yellow: WRED - 'yellow' configuration -+ * @red: WRED - 'red' configuration -+ * @tail_drop_threshold: tail drop threshold -+ */ -+struct dpni_early_drop_cfg { -+ enum dpni_early_drop_mode mode; -+ enum dpni_congestion_unit units; -+ -+ struct dpni_wred_cfg green; -+ struct dpni_wred_cfg yellow; -+ struct dpni_wred_cfg red; -+ -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * dpni_prepare_early_drop() - prepare an early drop. -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpni_set_rx_tc_early_drop or -+ * dpni_set_tx_tc_early_drop -+ * -+ */ -+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf); -+ -+/** -+ * dpni_extract_early_drop() - extract the early drop configuration. -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called after dpni_get_rx_tc_early_drop or -+ * dpni_get_tx_tc_early_drop -+ * -+ */ -+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, -+ const uint8_t *early_drop_buf); -+ -+/** -+ * dpni_set_rx_tc_early_drop() - Set Rx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled -+ * with the early-drop configuration by calling dpni_prepare_early_drop() -+ * -+ * warning: Before calling this function, call dpni_prepare_early_drop() to -+ * prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_get_rx_tc_early_drop() - Get Rx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory -+ * -+ * warning: After calling this function, call dpni_extract_early_drop() to -+ * get the early drop configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_set_tx_tc_early_drop() - Set Tx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled -+ * with the early-drop configuration by calling dpni_prepare_early_drop() -+ * -+ * warning: Before calling this function, call dpni_prepare_early_drop() to -+ * prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_get_tx_tc_early_drop() - Get Tx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory -+ * -+ * warning: After calling this function, call dpni_extract_early_drop() to -+ * get the early drop configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * enum dpni_dest - DPNI destination types -+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and -+ * does not generate FQDAN notifications; user is expected to -+ * dequeue from the queue based on polling or other user-defined -+ * method -+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to dequeue -+ * from the queue only after notification is received -+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified DPCON -+ * object; user is expected to dequeue from the DPCON channel -+ */ -+enum dpni_dest { -+ DPNI_DEST_NONE = 0, -+ DPNI_DEST_DPIO = 1, -+ DPNI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPNI_DEST_NONE' option -+ */ -+struct dpni_dest_cfg { -+ enum dpni_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/* DPNI congestion options */ -+ -+/** -+ * CSCN message is written to message_iova once entering a -+ * congestion state (see 'threshold_entry') -+ */ -+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 -+/** -+ * CSCN message is written to message_iova once exiting a -+ * congestion state (see 'threshold_exit') -+ */ -+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 -+/** -+ * CSCN write will attempt to allocate into a cache (coherent write); -+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is selected -+ */ -+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to -+ * DPIO/DPCON's WQ channel once entering a congestion state -+ * (see 'threshold_entry') -+ */ -+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to -+ * DPIO/DPCON's WQ channel once exiting a congestion state -+ * (see 'threshold_exit') -+ */ -+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the -+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) -+ */ -+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 -+ -+/** -+ * struct dpni_congestion_notification_cfg - congestion notification -+ * configuration -+ * @units: units type -+ * @threshold_entry: above this threshold we enter a congestion state. -+ * set it to '0' to disable it -+ * @threshold_exit: below this threshold we exit the congestion state. -+ * @message_ctx: The context that will be part of the CSCN message -+ * @message_iova: I/O virtual address (must be in DMA-able memory), -+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is -+ * contained in 'options' -+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel -+ * @options: Mask of available options; use 'DPNI_CONG_OPT_' values -+ */ -+ -+struct dpni_congestion_notification_cfg { -+ enum dpni_congestion_unit units; -+ uint32_t threshold_entry; -+ uint32_t threshold_exit; -+ uint64_t message_ctx; -+ uint64_t message_iova; -+ struct dpni_dest_cfg dest_cfg; -+ uint16_t options; -+}; -+ -+/** -+ * dpni_set_rx_tc_congestion_notification() - Set Rx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_rx_tc_congestion_notification() - Get Rx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_set_tx_tc_congestion_notification() - Set Tx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_tx_tc_congestion_notification() - Get Tx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * enum dpni_flc_type - DPNI FLC types -+ * @DPNI_FLC_USER_DEFINED: select the FLC to be used for user defined value -+ * @DPNI_FLC_STASH: select the FLC to be used for stash control -+ */ -+enum dpni_flc_type { -+ DPNI_FLC_USER_DEFINED = 0, -+ DPNI_FLC_STASH = 1, -+}; -+ -+/** -+ * enum dpni_stash_size - DPNI FLC stashing size -+ * @DPNI_STASH_SIZE_0B: no stash -+ * @DPNI_STASH_SIZE_64B: stashes 64 bytes -+ * @DPNI_STASH_SIZE_128B: stashes 128 bytes -+ * @DPNI_STASH_SIZE_192B: stashes 192 bytes -+ */ -+enum dpni_stash_size { -+ DPNI_STASH_SIZE_0B = 0, -+ DPNI_STASH_SIZE_64B = 1, -+ DPNI_STASH_SIZE_128B = 2, -+ DPNI_STASH_SIZE_192B = 3, -+}; -+ -+/* DPNI FLC stash options */ -+ -+/** -+ * stashes the whole annotation area (up to 192 bytes) -+ */ -+#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001 -+ -+/** -+ * struct dpni_flc_cfg - Structure representing DPNI FLC configuration -+ * @flc_type: FLC type -+ * @options: Mask of available options; -+ * use 'DPNI_FLC_STASH_' values -+ * @frame_data_size: Size of frame data to be stashed -+ * @flow_context_size: Size of flow context to be stashed -+ * @flow_context: 1. In case flc_type is 'DPNI_FLC_USER_DEFINED': -+ * this value will be provided in the frame descriptor -+ * (FD[FLC]) -+ * 2. In case flc_type is 'DPNI_FLC_STASH': -+ * this value will be I/O virtual address of the -+ * flow-context; -+ * Must be cacheline-aligned and DMA-able memory -+ */ -+struct dpni_flc_cfg { -+ enum dpni_flc_type flc_type; -+ uint32_t options; -+ enum dpni_stash_size frame_data_size; -+ enum dpni_stash_size flow_context_size; -+ uint64_t flow_context; -+}; -+ -+/** -+ * DPNI queue modification options -+ */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPNI_QUEUE_OPT_DEST 0x00000002 -+/** Select to modify the flow-context parameters; -+ * not applicable for Tx-conf/Err queues as the FD comes from the user -+ */ -+#define DPNI_QUEUE_OPT_FLC 0x00000004 -+/** -+ * Select to modify the queue's order preservation -+ */ -+#define DPNI_QUEUE_OPT_ORDER_PRESERVATION 0x00000008 -+/* Select to modify the queue's tail-drop threshold */ -+#define DPNI_QUEUE_OPT_TAILDROP_THRESHOLD 0x00000010 -+ -+/** -+ * struct dpni_queue_cfg - Structure representing queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPNI_QUEUE_OPT_' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; valid only if 'DPNI_QUEUE_OPT_USER_CTX' -+ * is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPNI_QUEUE_OPT_DEST' is contained in 'options' -+ * @flc_cfg: Flow context configuration; in case the TC's distribution -+ * is either NONE or HASH the FLC's settings of flow#0 are used. -+ * in the case of FS (flow-steering) the flow's FLC settings -+ * are used. -+ * valid only if 'DPNI_QUEUE_OPT_FLC' is contained in 'options' -+ * @order_preservation_en: enable/disable order preservation; -+ * valid only if 'DPNI_QUEUE_OPT_ORDER_PRESERVATION' is contained -+ * in 'options' -+ * @tail_drop_threshold: set the queue's tail drop threshold in bytes; -+ * '0' value disable the threshold; maximum value is 0xE000000; -+ * valid only if 'DPNI_QUEUE_OPT_TAILDROP_THRESHOLD' is contained -+ * in 'options' -+ */ -+struct dpni_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpni_dest_cfg dest_cfg; -+ struct dpni_flc_cfg flc_cfg; -+ int order_preservation_en; -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * struct dpni_queue_attr - Structure representing queue attributes -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @flc_cfg: Flow context configuration -+ * @order_preservation_en: enable/disable order preservation -+ * @tail_drop_threshold: queue's tail drop threshold in bytes; -+ * @fqid: Virtual fqid value to be used for dequeue operations -+ */ -+struct dpni_queue_attr { -+ uint64_t user_ctx; -+ struct dpni_dest_cfg dest_cfg; -+ struct dpni_flc_cfg flc_cfg; -+ int order_preservation_en; -+ uint32_t tail_drop_threshold; -+ -+ uint32_t fqid; -+}; -+ -+/** -+ * DPNI Tx flow modification options -+ */ -+ -+/** -+ * Select to modify the settings for dedicate Tx confirmation/error -+ */ -+#define DPNI_TX_FLOW_OPT_TX_CONF_ERROR 0x00000001 -+/** -+ * Select to modify the L3 checksum generation setting -+ */ -+#define DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN 0x00000010 -+/** -+ * Select to modify the L4 checksum generation setting -+ */ -+#define DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN 0x00000020 -+ -+/** -+ * struct dpni_tx_flow_cfg - Structure representing Tx flow configuration -+ * @options: Flags representing the suggested modifications to the Tx flow; -+ * Use any combination 'DPNI_TX_FLOW_OPT_' flags -+ * @use_common_tx_conf_queue: Set to '1' to use the common (default) Tx -+ * confirmation and error queue; Set to '0' to use the private -+ * Tx confirmation and error queue; valid only if -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' wasn't set at DPNI creation -+ * and 'DPNI_TX_FLOW_OPT_TX_CONF_ERROR' is contained in 'options' -+ * @l3_chksum_gen: Set to '1' to enable L3 checksum generation; '0' to disable; -+ * valid only if 'DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN' is contained in 'options' -+ * @l4_chksum_gen: Set to '1' to enable L4 checksum generation; '0' to disable; -+ * valid only if 'DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN' is contained in 'options' -+ */ -+struct dpni_tx_flow_cfg { -+ uint32_t options; -+ int use_common_tx_conf_queue; -+ int l3_chksum_gen; -+ int l4_chksum_gen; -+}; -+ -+/** -+ * dpni_set_tx_flow() - Set Tx flow configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: Provides (or returns) the sender's flow ID; -+ * for each new sender set (*flow_id) to 'DPNI_NEW_FLOW_ID' to generate -+ * a new flow_id; this ID should be used as the QDBIN argument -+ * in enqueue operations -+ * @cfg: Tx flow configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *flow_id, -+ const struct dpni_tx_flow_cfg *cfg); -+ -+/** -+ * struct dpni_tx_flow_attr - Structure representing Tx flow attributes -+ * @use_common_tx_conf_queue: '1' if using common (default) Tx confirmation and -+ * error queue; '0' if using private Tx confirmation and error queue -+ * @l3_chksum_gen: '1' if L3 checksum generation is enabled; '0' if disabled -+ * @l4_chksum_gen: '1' if L4 checksum generation is enabled; '0' if disabled -+ */ -+struct dpni_tx_flow_attr { -+ int use_common_tx_conf_queue; -+ int l3_chksum_gen; -+ int l4_chksum_gen; -+}; -+ -+/** -+ * dpni_get_tx_flow() - Get Tx flow attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function -+ * @attr: Returned Tx flow attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_flow_attr *attr); -+ -+/** -+ * struct dpni_tx_conf_cfg - Structure representing Tx conf configuration -+ * @errors_only: Set to '1' to report back only error frames; -+ * Set to '0' to confirm transmission/error for all transmitted frames; -+ * @queue_cfg: Queue configuration -+ */ -+struct dpni_tx_conf_cfg { -+ int errors_only; -+ struct dpni_queue_cfg queue_cfg; -+}; -+ -+/** -+ * dpni_set_tx_conf() - Set Tx confirmation and error queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: Queue configuration -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * if 'DPNI_OPT_TX_CONF_DISABLED' was selected, only error frames are reported -+ * back - successfully transmitted frames are not confirmed. Otherwise, all -+ * transmitted frames are sent for confirmation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_tx_conf_cfg *cfg); -+ -+/** -+ * struct dpni_tx_conf_attr - Structure representing Tx conf attributes -+ * @errors_only: '1' if only error frames are reported back; '0' if all -+ * transmitted frames are confirmed -+ * @queue_attr: Queue attributes -+ */ -+struct dpni_tx_conf_attr { -+ int errors_only; -+ struct dpni_queue_attr queue_attr; -+}; -+ -+/** -+ * dpni_get_tx_conf() - Get Tx confirmation and error queue attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @attr: Returned tx-conf attributes -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_conf_attr *attr); -+ -+/** -+ * dpni_set_tx_conf_congestion_notification() - Set Tx conf congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: congestion notification configuration -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_tx_conf_congestion_notification() - Get Tx conf congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: congestion notification -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_set_tx_conf_revoke() - Tx confirmation revocation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @revoke: revoke or not -+ * -+ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not -+ * selected at DPNI creation. -+ * Calling this function with 'revoke' set to '1' disables all transmit -+ * confirmation (including the private confirmation queues), regardless of -+ * previous settings; Note that in this case, Tx error frames are still -+ * enqueued to the general transmit errors queue. -+ * Calling this function with 'revoke' set to '0' restores the previous -+ * settings for both general and private transmit confirmation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int revoke); -+ -+/** -+ * dpni_set_rx_flow() - Set Rx flow configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7); -+ * use 'DPNI_ALL_TCS' to set all TCs and all flows -+ * @flow_id: Rx flow id within the traffic class; use -+ * 'DPNI_ALL_TC_FLOWS' to set all flows within -+ * this tc_id; ignored if tc_id is set to -+ * 'DPNI_ALL_TCS'; -+ * @cfg: Rx flow configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ const struct dpni_queue_cfg *cfg); -+ -+/** -+ * dpni_get_rx_flow() - Get Rx flow attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @flow_id: Rx flow id within the traffic class -+ * @attr: Returned Rx flow attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ struct dpni_queue_attr *attr); -+ -+/** -+ * dpni_set_rx_err_queue() - Set Rx error queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_queue_cfg *cfg); -+ -+/** -+ * dpni_get_rx_err_queue() - Get Rx error queue attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @attr: Returned Queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_queue_attr *attr); -+ -+/** -+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration -+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with -+ * key extractions to be used as the QoS criteria by calling -+ * dpni_prepare_key_cfg() -+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); -+ * '0' to use the 'default_tc' in such cases -+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 -+ */ -+struct dpni_qos_tbl_cfg { -+ uint64_t key_cfg_iova; -+ int discard_on_miss; -+ uint8_t default_tc; -+}; -+ -+/** -+ * dpni_set_qos_table() - Set QoS mapping table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS table configuration -+ * -+ * This function and all QoS-related functions require that -+ *'max_tcs > 1' was set at DPNI creation. -+ * -+ * warning: Before calling this function, call dpni_prepare_key_cfg() to -+ * prepare the key_cfg_iova parameter -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_qos_tbl_cfg *cfg); -+ -+/** -+ * struct dpni_rule_cfg - Rule configuration for table lookup -+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory) -+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) -+ * @key_size: key and mask size (in bytes) -+ */ -+struct dpni_rule_cfg { -+ uint64_t key_iova; -+ uint64_t mask_iova; -+ uint8_t key_size; -+}; -+ -+/** -+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS rule to add -+ * @tc_id: Traffic class selection (0-7) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg, -+ uint8_t tc_id); -+ -+/** -+ * dpni_remove_qos_entry() - Remove QoS mapping entry -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS rule to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg); -+ -+/** -+ * dpni_clear_qos_table() - Clear all QoS mapping entries -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Following this function call, all frames are directed to -+ * the default traffic class (0) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class -+ * (to select a flow ID) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Flow steering rule to add -+ * @flow_id: Flow id selection (must be smaller than the -+ * distribution size of the traffic class) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg, -+ uint16_t flow_id); -+ -+/** -+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific -+ * traffic class -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Flow steering rule to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg); -+ -+/** -+ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific -+ * traffic class -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id); -+ -+/** -+ * dpni_set_vlan_insertion() - Enable/disable VLAN insertion for egress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set -+ * at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_vlan_removal() - Enable/disable VLAN removal for ingress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set -+ * at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_ipr() - Enable/disable IP reassembly of ingress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_IPR' option is set at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_ipr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_ipf() - Enable/disable IP fragmentation of egress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_IPF' option is set at DPNI -+ * creation. Fragmentation is performed according to MTU value -+ * set by dpni_set_mtu() function -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_ipf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+#endif /* __FSL_DPNI_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h -new file mode 100644 -index 0000000..c0f8af0 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h -@@ -0,0 +1,1058 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPNI_CMD_H -+#define _FSL_DPNI_CMD_H -+ -+/* DPNI Version */ -+#define DPNI_VER_MAJOR 6 -+#define DPNI_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPNI_CMDID_OPEN 0x801 -+#define DPNI_CMDID_CLOSE 0x800 -+#define DPNI_CMDID_CREATE 0x901 -+#define DPNI_CMDID_DESTROY 0x900 -+ -+#define DPNI_CMDID_ENABLE 0x002 -+#define DPNI_CMDID_DISABLE 0x003 -+#define DPNI_CMDID_GET_ATTR 0x004 -+#define DPNI_CMDID_RESET 0x005 -+#define DPNI_CMDID_IS_ENABLED 0x006 -+ -+#define DPNI_CMDID_SET_IRQ 0x010 -+#define DPNI_CMDID_GET_IRQ 0x011 -+#define DPNI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPNI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPNI_CMDID_SET_IRQ_MASK 0x014 -+#define DPNI_CMDID_GET_IRQ_MASK 0x015 -+#define DPNI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPNI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPNI_CMDID_SET_POOLS 0x200 -+#define DPNI_CMDID_GET_RX_BUFFER_LAYOUT 0x201 -+#define DPNI_CMDID_SET_RX_BUFFER_LAYOUT 0x202 -+#define DPNI_CMDID_GET_TX_BUFFER_LAYOUT 0x203 -+#define DPNI_CMDID_SET_TX_BUFFER_LAYOUT 0x204 -+#define DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT 0x205 -+#define DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT 0x206 -+#define DPNI_CMDID_SET_L3_CHKSUM_VALIDATION 0x207 -+#define DPNI_CMDID_GET_L3_CHKSUM_VALIDATION 0x208 -+#define DPNI_CMDID_SET_L4_CHKSUM_VALIDATION 0x209 -+#define DPNI_CMDID_GET_L4_CHKSUM_VALIDATION 0x20A -+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR 0x20B -+#define DPNI_CMDID_SET_TX_CONF_REVOKE 0x20C -+ -+#define DPNI_CMDID_GET_QDID 0x210 -+#define DPNI_CMDID_GET_SP_INFO 0x211 -+#define DPNI_CMDID_GET_TX_DATA_OFFSET 0x212 -+#define DPNI_CMDID_GET_COUNTER 0x213 -+#define DPNI_CMDID_SET_COUNTER 0x214 -+#define DPNI_CMDID_GET_LINK_STATE 0x215 -+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH 0x216 -+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH 0x217 -+#define DPNI_CMDID_SET_MTU 0x218 -+#define DPNI_CMDID_GET_MTU 0x219 -+#define DPNI_CMDID_SET_LINK_CFG 0x21A -+#define DPNI_CMDID_SET_TX_SHAPING 0x21B -+ -+#define DPNI_CMDID_SET_MCAST_PROMISC 0x220 -+#define DPNI_CMDID_GET_MCAST_PROMISC 0x221 -+#define DPNI_CMDID_SET_UNICAST_PROMISC 0x222 -+#define DPNI_CMDID_GET_UNICAST_PROMISC 0x223 -+#define DPNI_CMDID_SET_PRIM_MAC 0x224 -+#define DPNI_CMDID_GET_PRIM_MAC 0x225 -+#define DPNI_CMDID_ADD_MAC_ADDR 0x226 -+#define DPNI_CMDID_REMOVE_MAC_ADDR 0x227 -+#define DPNI_CMDID_CLR_MAC_FILTERS 0x228 -+ -+#define DPNI_CMDID_SET_VLAN_FILTERS 0x230 -+#define DPNI_CMDID_ADD_VLAN_ID 0x231 -+#define DPNI_CMDID_REMOVE_VLAN_ID 0x232 -+#define DPNI_CMDID_CLR_VLAN_FILTERS 0x233 -+ -+#define DPNI_CMDID_SET_RX_TC_DIST 0x235 -+#define DPNI_CMDID_SET_TX_FLOW 0x236 -+#define DPNI_CMDID_GET_TX_FLOW 0x237 -+#define DPNI_CMDID_SET_RX_FLOW 0x238 -+#define DPNI_CMDID_GET_RX_FLOW 0x239 -+#define DPNI_CMDID_SET_RX_ERR_QUEUE 0x23A -+#define DPNI_CMDID_GET_RX_ERR_QUEUE 0x23B -+ -+#define DPNI_CMDID_SET_RX_TC_POLICING 0x23E -+#define DPNI_CMDID_SET_RX_TC_EARLY_DROP 0x23F -+ -+#define DPNI_CMDID_SET_QOS_TBL 0x240 -+#define DPNI_CMDID_ADD_QOS_ENT 0x241 -+#define DPNI_CMDID_REMOVE_QOS_ENT 0x242 -+#define DPNI_CMDID_CLR_QOS_TBL 0x243 -+#define DPNI_CMDID_ADD_FS_ENT 0x244 -+#define DPNI_CMDID_REMOVE_FS_ENT 0x245 -+#define DPNI_CMDID_CLR_FS_ENT 0x246 -+#define DPNI_CMDID_SET_VLAN_INSERTION 0x247 -+#define DPNI_CMDID_SET_VLAN_REMOVAL 0x248 -+#define DPNI_CMDID_SET_IPR 0x249 -+#define DPNI_CMDID_SET_IPF 0x24A -+ -+#define DPNI_CMDID_SET_TX_SELECTION 0x250 -+#define DPNI_CMDID_GET_RX_TC_POLICING 0x251 -+#define DPNI_CMDID_GET_RX_TC_EARLY_DROP 0x252 -+#define DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION 0x253 -+#define DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION 0x254 -+#define DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION 0x255 -+#define DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION 0x256 -+#define DPNI_CMDID_SET_TX_CONF 0x257 -+#define DPNI_CMDID_GET_TX_CONF 0x258 -+#define DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION 0x259 -+#define DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION 0x25A -+#define DPNI_CMDID_SET_TX_TC_EARLY_DROP 0x25B -+#define DPNI_CMDID_GET_TX_TC_EARLY_DROP 0x25C -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_OPEN(cmd, dpni_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id) -+ -+#define DPNI_PREP_EXTENDED_CFG(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ -+ MC_PREP_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ -+ MC_PREP_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ -+ MC_PREP_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ -+ MC_PREP_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ -+ MC_PREP_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ -+ MC_PREP_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ -+ MC_PREP_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ -+ MC_PREP_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ -+ MC_PREP_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ -+ MC_PREP_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ -+ MC_PREP_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ -+ MC_PREP_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ -+ MC_PREP_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ -+ MC_PREP_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ -+ MC_PREP_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ -+ MC_PREP_OP(ext, 4, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv4); \ -+ MC_PREP_OP(ext, 4, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv6); \ -+ MC_PREP_OP(ext, 4, 32, 16, uint16_t, \ -+ cfg->ipr_cfg.max_reass_frm_size); \ -+ MC_PREP_OP(ext, 5, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv4); \ -+ MC_PREP_OP(ext, 5, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv6); \ -+} while (0) -+ -+#define DPNI_EXT_EXTENDED_CFG(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ -+ MC_EXT_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ -+ MC_EXT_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ -+ MC_EXT_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ -+ MC_EXT_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ -+ MC_EXT_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ -+ MC_EXT_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ -+ MC_EXT_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ -+ MC_EXT_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ -+ MC_EXT_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ -+ MC_EXT_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ -+ MC_EXT_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ -+ MC_EXT_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ -+ MC_EXT_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ -+ MC_EXT_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ -+ MC_EXT_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ -+ MC_EXT_OP(ext, 4, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv4); \ -+ MC_EXT_OP(ext, 4, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv6); \ -+ MC_EXT_OP(ext, 4, 32, 16, uint16_t, \ -+ cfg->ipr_cfg.max_reass_frm_size); \ -+ MC_EXT_OP(ext, 5, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv4); \ -+ MC_EXT_OP(ext, 5, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv6); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->adv.max_tcs); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->adv.max_senders); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->adv.options); \ -+ MC_CMD_OP(cmd, 2, 0, 8, uint8_t, cfg->adv.max_unicast_filters); \ -+ MC_CMD_OP(cmd, 2, 8, 8, uint8_t, cfg->adv.max_multicast_filters); \ -+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, cfg->adv.max_vlan_filters); \ -+ MC_CMD_OP(cmd, 2, 24, 8, uint8_t, cfg->adv.max_qos_entries); \ -+ MC_CMD_OP(cmd, 2, 32, 8, uint8_t, cfg->adv.max_qos_key_size); \ -+ MC_CMD_OP(cmd, 2, 48, 8, uint8_t, cfg->adv.max_dist_key_size); \ -+ MC_CMD_OP(cmd, 2, 56, 8, enum net_prot, cfg->adv.start_hdr); \ -+ MC_CMD_OP(cmd, 4, 48, 8, uint8_t, cfg->adv.max_policers); \ -+ MC_CMD_OP(cmd, 4, 56, 8, uint8_t, cfg->adv.max_congestion_ctrl); \ -+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, cfg->adv.ext_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_POOLS(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \ -+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\ -+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_ATTR(cmd, attr) \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, attr->ext_cfg_iova) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->max_tcs); \ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, attr->max_senders); \ -+ MC_RSP_OP(cmd, 0, 48, 8, enum net_prot, attr->start_hdr); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options); \ -+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->max_unicast_filters); \ -+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->max_multicast_filters);\ -+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->max_vlan_filters); \ -+ MC_RSP_OP(cmd, 2, 24, 8, uint8_t, attr->max_qos_entries); \ -+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->max_qos_key_size); \ -+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->max_dist_key_size); \ -+ MC_RSP_OP(cmd, 4, 48, 8, uint8_t, attr->max_policers); \ -+ MC_RSP_OP(cmd, 4, 56, 8, uint8_t, attr->max_congestion_ctrl); \ -+ MC_RSP_OP(cmd, 5, 32, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 5, 48, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \ -+ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_QDID(cmd, qdid) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_SP_INFO(cmd, sp_info) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, sp_info->spids[0]); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, sp_info->spids[1]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_DATA_OFFSET(cmd, data_offset) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, data_offset) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_COUNTER(cmd, counter) \ -+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_COUNTER(cmd, value) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, value) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_COUNTER(cmd, counter, value) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, value); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_LINK_STATE(cmd, state) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, tx_shaper->max_burst_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, tx_shaper->rate_limit);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MTU(cmd, mtu) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, mtu) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MTU(cmd, mtu) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, mtu) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \ -+ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_FILTERS(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_SELECTION(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[0].mode); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[1].mode); \ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 1, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[2].mode); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 1, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[3].mode); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[4].mode); \ -+ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[5].mode); \ -+ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[6].mode); \ -+ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[7].mode); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \ -+ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \ -+ cfg->fs_cfg.miss_action); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_FLOW(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 43, 1, int, cfg->l3_chksum_gen);\ -+ MC_CMD_OP(cmd, 0, 44, 1, int, cfg->l4_chksum_gen);\ -+ MC_CMD_OP(cmd, 0, 45, 1, int, cfg->use_common_tx_conf_queue);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_SET_TX_FLOW(cmd, flow_id) \ -+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_TX_FLOW(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_FLOW(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 43, 1, int, attr->l3_chksum_gen);\ -+ MC_RSP_OP(cmd, 0, 44, 1, int, attr->l4_chksum_gen);\ -+ MC_RSP_OP(cmd, 0, 45, 1, int, attr->use_common_tx_conf_queue);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.frame_data_size);\ -+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.flow_context_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ -+ MC_CMD_OP(cmd, 5, 0, 32, uint32_t, cfg->tail_drop_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_FLOW(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ -+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.frame_data_size);\ -+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.flow_context_size);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.frame_data_size);\ -+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.flow_context_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ -+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.frame_data_size);\ -+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.flow_context_size);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, revoke) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_QOS_TABLE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->default_tc); \ -+ MC_CMD_OP(cmd, 0, 40, 1, int, cfg->discard_on_miss); \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_INSERTION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_REMOVAL(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IPR(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IPF(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ -+ MC_CMD_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ -+ MC_RSP_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_PREP_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ -+ MC_PREP_OP(ext, 0, 2, 2, \ -+ enum dpni_congestion_unit, cfg->units); \ -+ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+ MC_PREP_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ -+ MC_PREP_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ -+ MC_PREP_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_EXT_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ -+ MC_EXT_OP(ext, 0, 2, 2, \ -+ enum dpni_congestion_unit, cfg->units); \ -+ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+ MC_EXT_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ -+ MC_EXT_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ -+ MC_EXT_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) -+ -+#define DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) -+ -+#define DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->queue_cfg.dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, \ -+ cfg->queue_cfg.dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->errors_only); \ -+ MC_CMD_OP(cmd, 0, 46, 1, int, cfg->queue_cfg.order_preservation_en); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->queue_cfg.user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->queue_cfg.options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->queue_cfg.dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 32, uint32_t, \ -+ cfg->queue_cfg.tail_drop_threshold); \ -+ MC_CMD_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ -+ cfg->queue_cfg.flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ -+ cfg->queue_cfg.flc_cfg.frame_data_size); \ -+ MC_CMD_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ -+ cfg->queue_cfg.flc_cfg.flow_context_size); \ -+ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->queue_cfg.flc_cfg.options); \ -+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, \ -+ cfg->queue_cfg.flc_cfg.flow_context); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_CONF(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+#define DPNI_RSP_GET_TX_CONF(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, \ -+ attr->queue_attr.dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, \ -+ attr->queue_attr.dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->errors_only); \ -+ MC_RSP_OP(cmd, 0, 46, 1, int, \ -+ attr->queue_attr.order_preservation_en); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->queue_attr.user_ctx); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, attr->queue_attr.dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, \ -+ attr->queue_attr.tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->queue_attr.fqid); \ -+ MC_RSP_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ -+ attr->queue_attr.flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ -+ attr->queue_attr.flc_cfg.frame_data_size); \ -+ MC_RSP_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ -+ attr->queue_attr.flc_cfg.flow_context_size); \ -+ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, attr->queue_attr.flc_cfg.options); \ -+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, \ -+ attr->queue_attr.flc_cfg.flow_context); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+#define DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#endif /* _FSL_DPNI_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dprc.h b/drivers/net/dpaa2/mc/fsl_dprc.h -new file mode 100644 -index 0000000..c831f46 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dprc.h -@@ -0,0 +1,1032 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPRC_H -+#define _FSL_DPRC_H -+ -+/* Data Path Resource Container API -+ * Contains DPRC API for managing and querying DPAA resources -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * Set this value as the icid value in dprc_cfg structure when creating a -+ * container, in case the ICID is not selected by the user and should be -+ * allocated by the DPRC from the pool of ICIDs. -+ */ -+#define DPRC_GET_ICID_FROM_POOL (uint16_t)(~(0)) -+ -+/** -+ * Set this value as the portal_id value in dprc_cfg structure when creating a -+ * container, in case the portal ID is not specifically selected by the -+ * user and should be allocated by the DPRC from the pool of portal ids. -+ */ -+#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0)) -+ -+/** -+ * dprc_get_container_id() - Get container ID associated with a given portal. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @container_id: Requested container ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_container_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int *container_id); -+ -+/** -+ * dprc_open() - Open DPRC object for use -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @container_id: Container ID to open -+ * @token: Returned token of DPRC object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Required before any operation on the object. -+ */ -+int dprc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int container_id, -+ uint16_t *token); -+ -+/** -+ * dprc_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * Container general options -+ * -+ * These options may be selected at container creation by the container creator -+ * and can be retrieved using dprc_get_attributes() -+ */ -+ -+/** -+ * Spawn Policy Option allowed - Indicates that the new container is allowed -+ * to spawn and have its own child containers. -+ */ -+#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001 -+ -+/** -+ * General Container allocation policy - Indicates that the new container is -+ * allowed to allocate requested resources from its parent container; if not -+ * set, the container is only allowed to use resources in its own pools; Note -+ * that this is a container's global policy, but the parent container may -+ * override it and set specific quota per resource type. -+ */ -+#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002 -+ -+/** -+ * Object initialization allowed - software context associated with this -+ * container is allowed to invoke object initialization operations. -+ */ -+#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004 -+ -+/** -+ * Topology change allowed - software context associated with this -+ * container is allowed to invoke topology operations, such as attach/detach -+ * of network objects. -+ */ -+#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008 -+ -+/** -+ * AIOP - Indicates that container belongs to AIOP. -+ */ -+#define DPRC_CFG_OPT_AIOP 0x00000020 -+ -+/** -+ * IRQ Config - Indicates that the container allowed to configure its IRQs. -+ */ -+#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040 -+ -+/** -+ * struct dprc_cfg - Container configuration options -+ * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free -+ * ICID value is allocated by the DPRC -+ * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free -+ * portal ID is allocated by the DPRC -+ * @options: Combination of 'DPRC_CFG_OPT_' options -+ * @label: Object's label -+ */ -+struct dprc_cfg { -+ uint16_t icid; -+ int portal_id; -+ uint64_t options; -+ char label[16]; -+}; -+ -+/** -+ * dprc_create_container() - Create child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @cfg: Child container configuration -+ * @child_container_id: Returned child container ID -+ * @child_portal_offset: Returned child portal offset from MC portal base -+ * -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_create_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_cfg *cfg, -+ int *child_container_id, -+ uint64_t *child_portal_offset); -+ -+/** -+ * dprc_destroy_container() - Destroy child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the container to destroy -+ * -+ * This function terminates the child container, so following this call the -+ * child container ID becomes invalid. -+ * -+ * Notes: -+ * - All resources and objects of the destroyed container are returned to the -+ * parent container or destroyed if were created be the destroyed container. -+ * - This function destroy all the child containers of the specified -+ * container prior to destroying the container itself. -+ * -+ * warning: Only the parent container is allowed to destroy a child policy -+ * Container 0 can't be destroyed -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ */ -+int dprc_destroy_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id); -+ -+/** -+ * dprc_reset_container - Reset child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the container to reset -+ * -+ * In case a software context crashes or becomes non-responsive, the parent -+ * may wish to reset its resources container before the software context is -+ * restarted. -+ * -+ * This routine informs all objects assigned to the child container that the -+ * container is being reset, so they may perform any cleanup operations that are -+ * needed. All objects handles that were owned by the child container shall be -+ * closed. -+ * -+ * Note that such request may be submitted even if the child software context -+ * has not crashed, but the resulting object cleanup operations will not be -+ * aware of that. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_reset_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id); -+ -+/** -+ * DPRC IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPRC_IRQ_INDEX 0 -+ -+/** -+ * Number of dprc's IRQs -+ */ -+#define DPRC_NUM_OF_IRQS 1 -+ -+/* DPRC IRQ events */ -+/** -+ * IRQ event - Indicates that a new object added to the container -+ */ -+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001 -+/** -+ * IRQ event - Indicates that an object was removed from the container -+ */ -+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002 -+/** -+ * IRQ event - Indicates that resources added to the container -+ */ -+#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004 -+/** -+ * IRQ event - Indicates that resources removed from the container -+ */ -+#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008 -+/** -+ * IRQ event - Indicates that one of the descendant containers that opened by -+ * this container is destroyed -+ */ -+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010 -+/** -+ * IRQ event - Indicates that on one of the container's opened object is -+ * destroyed -+ */ -+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020 -+/** -+ * Irq event - Indicates that object is created at the container -+ */ -+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040 -+ -+/** -+ * struct dprc_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dprc_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_irq() - Get IRQ information from the DPRC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dprc_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dprc_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dprc_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dprc_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dprc_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dprc_attributes - Container attributes -+ * @container_id: Container's ID -+ * @icid: Container's ICID -+ * @portal_id: Container's portal ID -+ * @options: Container's options as set at container's creation -+ * @version: DPRC version -+ */ -+struct dprc_attributes { -+ int container_id; -+ uint16_t icid; -+ int portal_id; -+ uint64_t options; -+ /** -+ * struct version - DPRC version -+ * @major: DPRC major version -+ * @minor: DPRC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dprc_get_attributes() - Obtains container attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @attributes: Returned container attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_attributes *attributes); -+ -+/** -+ * dprc_set_res_quota() - Set allocation policy for a specific resource/object -+ * type in a child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @type: Resource/object type -+ * @quota: Sets the maximum number of resources of the selected type -+ * that the child container is allowed to allocate from its parent; -+ * when quota is set to -1, the policy is the same as container's -+ * general policy. -+ * -+ * Allocation policy determines whether or not a container may allocate -+ * resources from its parent. Each container has a 'global' allocation policy -+ * that is set when the container is created. -+ * -+ * This function sets allocation policy for a specific resource type. -+ * The default policy for all resource types matches the container's 'global' -+ * allocation policy. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Only the parent container is allowed to change a child policy. -+ */ -+int dprc_set_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t quota); -+ -+/** -+ * dprc_get_res_quota() - Gets the allocation policy of a specific -+ * resource/object type in a child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @type: resource/object type -+ * @quota: Returnes the maximum number of resources of the selected type -+ * that the child container is allowed to allocate from the parent; -+ * when quota is set to -1, the policy is the same as container's -+ * general policy. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t *quota); -+ -+/* Resource request options */ -+ -+/** -+ * Explicit resource ID request - The requested objects/resources -+ * are explicit and sequential (in case of resources). -+ * The base ID is given at res_req at base_align field -+ */ -+#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001 -+ -+/** -+ * Aligned resources request - Relevant only for resources -+ * request (and not objects). Indicates that resources base ID should be -+ * sequential and aligned to the value given at dprc_res_req base_align field -+ */ -+#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002 -+ -+/** -+ * Plugged Flag - Relevant only for object assignment request. -+ * Indicates that after all objects assigned. An interrupt will be invoked at -+ * the relevant GPP. The assigned object will be marked as plugged. -+ * plugged objects can't be assigned from their container -+ */ -+#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004 -+ -+/** -+ * struct dprc_res_req - Resource request descriptor, to be used in assignment -+ * or un-assignment of resources and objects. -+ * @type: Resource/object type: Represent as a NULL terminated string. -+ * This string may received by using dprc_get_pool() to get resource -+ * type and dprc_get_obj() to get object type; -+ * Note: it is not possible to assign/un-assign DPRC objects -+ * @num: Number of resources -+ * @options: Request options: combination of DPRC_RES_REQ_OPT_ options -+ * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT -+ * is set at option), this field represents the required base ID -+ * for resource allocation; In case of aligned assignment -+ * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field -+ * indicates the required alignment for the resource ID(s) - -+ * use 0 if there is no alignment or explicit ID requirements -+ */ -+struct dprc_res_req { -+ char type[16]; -+ uint32_t num; -+ uint32_t options; -+ int id_base_align; -+}; -+ -+/** -+ * dprc_assign() - Assigns objects or resource to a child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @container_id: ID of the child container -+ * @res_req: Describes the type and amount of resources to -+ * assign to the given container -+ * -+ * Assignment is usually done by a parent (this DPRC) to one of its child -+ * containers. -+ * -+ * According to the DPRC allocation policy, the assigned resources may be taken -+ * (allocated) from the container's ancestors, if not enough resources are -+ * available in the container itself. -+ * -+ * The type of assignment depends on the dprc_res_req options, as follows: -+ * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have -+ * the explicit base ID specified at the id_base_align field of res_req. -+ * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be -+ * aligned to the value given at id_base_align field of res_req. -+ * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment, -+ * and indicates that the object must be set to the plugged state. -+ * -+ * A container may use this function with its own ID in order to change a -+ * object state to plugged or unplugged. -+ * -+ * If IRQ information has been set in the child DPRC, it will signal an -+ * interrupt following every change in its object assignment. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_assign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int container_id, -+ struct dprc_res_req *res_req); -+ -+/** -+ * dprc_unassign() - Un-assigns objects or resources from a child container -+ * and moves them into this (parent) DPRC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @res_req: Describes the type and amount of resources to un-assign from -+ * the child container -+ * -+ * Un-assignment of objects can succeed only if the object is not in the -+ * plugged or opened state. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_unassign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ struct dprc_res_req *res_req); -+ -+/** -+ * dprc_get_pool_count() - Get the number of dprc's pools -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @pool_count: Returned number of resource pools in the dprc -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_pool_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *pool_count); -+ -+/** -+ * dprc_get_pool() - Get the type (string) of a certain dprc's pool -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @pool_index: Index of the pool to be queried (< pool_count) -+ * @type: The type of the pool -+ * -+ * The pool types retrieved one by one by incrementing -+ * pool_index up to (not including) the value of pool_count returned -+ * from dprc_get_pool_count(). dprc_get_pool_count() must -+ * be called prior to dprc_get_pool(). -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_pool(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int pool_index, -+ char *type); -+ -+/** -+ * dprc_get_obj_count() - Obtains the number of objects in the DPRC -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_count: Number of objects assigned to the DPRC -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *obj_count); -+ -+/** -+ * Objects Attributes Flags -+ */ -+ -+/** -+ * Opened state - Indicates that an object is open by at least one owner -+ */ -+#define DPRC_OBJ_STATE_OPEN 0x00000001 -+/** -+ * Plugged state - Indicates that the object is plugged -+ */ -+#define DPRC_OBJ_STATE_PLUGGED 0x00000002 -+ -+/** -+ * Shareability flag - Object flag indicating no memory shareability. -+ * the object generates memory accesses that are non coherent with other -+ * masters; -+ * user is responsible for proper memory handling through IOMMU configuration. -+ */ -+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 -+ -+/** -+ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj() -+ * @type: Type of object: NULL terminated string -+ * @id: ID of logical object resource -+ * @vendor: Object vendor identifier -+ * @ver_major: Major version number -+ * @ver_minor: Minor version number -+ * @irq_count: Number of interrupts supported by the object -+ * @region_count: Number of mappable regions supported by the object -+ * @state: Object state: combination of DPRC_OBJ_STATE_ states -+ * @label: Object label -+ * @flags: Object's flags -+ */ -+struct dprc_obj_desc { -+ char type[16]; -+ int id; -+ uint16_t vendor; -+ uint16_t ver_major; -+ uint16_t ver_minor; -+ uint8_t irq_count; -+ uint8_t region_count; -+ uint32_t state; -+ char label[16]; -+ uint16_t flags; -+}; -+ -+/** -+ * dprc_get_obj() - Get general information on an object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_index: Index of the object to be queried (< obj_count) -+ * @obj_desc: Returns the requested object descriptor -+ * -+ * The object descriptors are retrieved one by one by incrementing -+ * obj_index up to (not including) the value of obj_count returned -+ * from dprc_get_obj_count(). dprc_get_obj_count() must -+ * be called prior to dprc_get_obj(). -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int obj_index, -+ struct dprc_obj_desc *obj_desc); -+ -+/** -+ * dprc_get_obj_desc() - Get object descriptor. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: The type of the object to get its descriptor. -+ * @obj_id: The id of the object to get its descriptor -+ * @obj_desc: The returned descriptor to fill and return to the user -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ */ -+int dprc_get_obj_desc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ struct dprc_obj_desc *obj_desc); -+ -+/** -+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Type of the object to set its IRQ -+ * @obj_id: ID of the object to set its IRQ -+ * @irq_index: The interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_obj_irq() - Get IRQ information from object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Type od the object to get its IRQ -+ * @obj_id: ID of the object to get its IRQ -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: The returned IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_res_count() - Obtains the number of free resources that are -+ * assigned to this container, by pool type -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @type: pool type -+ * @res_count: Returned number of free resources of the given -+ * resource type that are assigned to this DPRC -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ int *res_count); -+ -+/** -+ * enum dprc_iter_status - Iteration status -+ * @DPRC_ITER_STATUS_FIRST: Perform first iteration -+ * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed -+ * @DPRC_ITER_STATUS_LAST: Indicates last iteration -+ */ -+enum dprc_iter_status { -+ DPRC_ITER_STATUS_FIRST = 0, -+ DPRC_ITER_STATUS_MORE = 1, -+ DPRC_ITER_STATUS_LAST = 2 -+}; -+ -+/** -+ * struct dprc_res_ids_range_desc - Resource ID range descriptor -+ * @base_id: Base resource ID of this range -+ * @last_id: Last resource ID of this range -+ * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at -+ * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE, -+ * additional iterations are needed, until the returned marker is -+ * DPRC_ITER_STATUS_LAST -+ */ -+struct dprc_res_ids_range_desc { -+ int base_id; -+ int last_id; -+ enum dprc_iter_status iter_status; -+}; -+ -+/** -+ * dprc_get_res_ids() - Obtains IDs of free resources in the container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @type: pool type -+ * @range_desc: range descriptor -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_ids(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ struct dprc_res_ids_range_desc *range_desc); -+ -+/** -+ * Region flags -+ */ -+/** -+ * Cacheable - Indicates that region should be mapped as cacheable -+ */ -+#define DPRC_REGION_CACHEABLE 0x00000001 -+ -+/** -+ * enum dprc_region_type - Region type -+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region -+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region -+ */ -+enum dprc_region_type { -+ DPRC_REGION_TYPE_MC_PORTAL, -+ DPRC_REGION_TYPE_QBMAN_PORTAL -+}; -+ -+/** -+ * struct dprc_region_desc - Mappable region descriptor -+ * @base_offset: Region offset from region's base address. -+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals -+ * base address; For DPIO, region base is offset from SoC QMan portals -+ * base address -+ * @size: Region size (in bytes) -+ * @flags: Region attributes -+ * @type: Portal region type -+ */ -+struct dprc_region_desc { -+ uint32_t base_offset; -+ uint32_t size; -+ uint32_t flags; -+ enum dprc_region_type type; -+}; -+ -+/** -+ * dprc_get_obj_region() - Get region information for a specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Object type as returned in dprc_get_obj() -+ * @obj_id: Unique object instance as returned in dprc_get_obj() -+ * @region_index: The specific region to query -+ * @region_desc: Returns the requested region descriptor -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_region(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t region_index, -+ struct dprc_region_desc *region_desc); -+ -+/** -+ * dprc_set_obj_label() - Set object label. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Object's type -+ * @obj_id: Object's ID -+ * @label: The required label. The maximum length is 16 chars. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_obj_label(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ char *label); -+ -+/** -+ * struct dprc_endpoint - Endpoint description for link connect/disconnect -+ * operations -+ * @type: Endpoint object type: NULL terminated string -+ * @id: Endpoint object ID -+ * @if_id: Interface ID; should be set for endpoints with multiple -+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0 -+ */ -+struct dprc_endpoint { -+ char type[16]; -+ int id; -+ uint16_t if_id; -+}; -+ -+/** -+ * struct dprc_connection_cfg - Connection configuration. -+ * Used for virtual connections only -+ * @committed_rate: Committed rate (Mbits/s) -+ * @max_rate: Maximum rate (Mbits/s) -+ */ -+struct dprc_connection_cfg { -+ uint32_t committed_rate; -+ uint32_t max_rate; -+}; -+ -+/** -+ * dprc_connect() - Connect two endpoints to create a network link between them -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @endpoint1: Endpoint 1 configuration parameters -+ * @endpoint2: Endpoint 2 configuration parameters -+ * @cfg: Connection configuration. The connection configuration is ignored for -+ * connections made to DPMAC objects, where rate is retrieved from the -+ * MAC configuration. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_connect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ const struct dprc_endpoint *endpoint2, -+ const struct dprc_connection_cfg *cfg); -+ -+/** -+ * dprc_disconnect() - Disconnect one endpoint to remove its network connection -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @endpoint: Endpoint configuration parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_disconnect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint); -+ -+/** -+* dprc_get_connection() - Get connected endpoint and link status if connection -+* exists. -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPRC object -+* @endpoint1: Endpoint 1 configuration parameters -+* @endpoint2: Returned endpoint 2 configuration parameters -+* @state: Returned link state: -+* 1 - link is up; -+* 0 - link is down; -+* -1 - no connection (endpoint2 information is irrelevant) -+* -+* Return: '0' on Success; -ENAVAIL if connection does not exist. -+*/ -+int dprc_get_connection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ struct dprc_endpoint *endpoint2, -+ int *state); -+ -+#endif /* _FSL_DPRC_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dprc_cmd.h b/drivers/net/dpaa2/mc/fsl_dprc_cmd.h -new file mode 100644 -index 0000000..469e286 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dprc_cmd.h -@@ -0,0 +1,755 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPRC_CMD_H -+#define _FSL_DPRC_CMD_H -+ -+/* DPRC Version */ -+#define DPRC_VER_MAJOR 5 -+#define DPRC_VER_MINOR 1 -+ -+/* Command IDs */ -+#define DPRC_CMDID_CLOSE 0x800 -+#define DPRC_CMDID_OPEN 0x805 -+#define DPRC_CMDID_CREATE 0x905 -+ -+#define DPRC_CMDID_GET_ATTR 0x004 -+#define DPRC_CMDID_RESET_CONT 0x005 -+ -+#define DPRC_CMDID_SET_IRQ 0x010 -+#define DPRC_CMDID_GET_IRQ 0x011 -+#define DPRC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPRC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPRC_CMDID_SET_IRQ_MASK 0x014 -+#define DPRC_CMDID_GET_IRQ_MASK 0x015 -+#define DPRC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPRC_CMDID_CREATE_CONT 0x151 -+#define DPRC_CMDID_DESTROY_CONT 0x152 -+#define DPRC_CMDID_GET_CONT_ID 0x830 -+#define DPRC_CMDID_SET_RES_QUOTA 0x155 -+#define DPRC_CMDID_GET_RES_QUOTA 0x156 -+#define DPRC_CMDID_ASSIGN 0x157 -+#define DPRC_CMDID_UNASSIGN 0x158 -+#define DPRC_CMDID_GET_OBJ_COUNT 0x159 -+#define DPRC_CMDID_GET_OBJ 0x15A -+#define DPRC_CMDID_GET_RES_COUNT 0x15B -+#define DPRC_CMDID_GET_RES_IDS 0x15C -+#define DPRC_CMDID_GET_OBJ_REG 0x15E -+#define DPRC_CMDID_SET_OBJ_IRQ 0x15F -+#define DPRC_CMDID_GET_OBJ_IRQ 0x160 -+#define DPRC_CMDID_SET_OBJ_LABEL 0x161 -+#define DPRC_CMDID_GET_OBJ_DESC 0x162 -+ -+#define DPRC_CMDID_CONNECT 0x167 -+#define DPRC_CMDID_DISCONNECT 0x168 -+#define DPRC_CMDID_GET_POOL 0x169 -+#define DPRC_CMDID_GET_POOL_COUNT 0x16A -+ -+#define DPRC_CMDID_GET_CONNECTION 0x16C -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_CONTAINER_ID(cmd, container_id) \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, container_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_OPEN(cmd, container_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, container_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_CREATE_CONTAINER(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->icid); \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->portal_id); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, cfg->label[0]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, cfg->label[1]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, cfg->label[2]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, cfg->label[3]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, cfg->label[4]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, cfg->label[5]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, cfg->label[6]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, cfg->label[7]);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, cfg->label[8]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, cfg->label[9]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, cfg->label[10]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, cfg->label[11]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, cfg->label[12]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, cfg->label[13]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, cfg->label[14]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, cfg->label[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_CREATE_CONTAINER(cmd, child_container_id, child_portal_offset)\ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 32, int, child_container_id); \ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, child_portal_offset);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_DESTROY_CONTAINER(cmd, child_container_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_RESET_CONTAINER(cmd, child_container_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->container_id); \ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->icid); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options);\ -+ MC_RSP_OP(cmd, 1, 32, 32, int, attr->portal_id); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_RES_QUOTA(cmd, child_container_id, type, quota) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, quota);\ -+ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_RES_QUOTA(cmd, child_container_id, type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\ -+} while (0) -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_RES_QUOTA(cmd, quota) \ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, quota) -+ -+/* param, offset, width, type, arg_name */ -+#define DPRC_CMD_ASSIGN(cmd, container_id, res_req) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, container_id); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, res_req->options);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, res_req->num); \ -+ MC_CMD_OP(cmd, 1, 32, 32, int, res_req->id_base_align); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, res_req->type[0]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, res_req->type[1]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, res_req->type[2]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, res_req->type[3]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, res_req->type[4]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, res_req->type[5]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, res_req->type[6]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, res_req->type[7]);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, res_req->type[8]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, res_req->type[9]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, res_req->type[10]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, res_req->type[11]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, res_req->type[12]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, res_req->type[13]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, res_req->type[14]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, res_req->type[15]);\ -+} while (0) -+ -+/* param, offset, width, type, arg_name */ -+#define DPRC_CMD_UNASSIGN(cmd, child_container_id, res_req) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, res_req->options);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, res_req->num); \ -+ MC_CMD_OP(cmd, 1, 32, 32, int, res_req->id_base_align); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, res_req->type[0]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, res_req->type[1]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, res_req->type[2]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, res_req->type[3]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, res_req->type[4]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, res_req->type[5]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, res_req->type[6]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, res_req->type[7]);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, res_req->type[8]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, res_req->type[9]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, res_req->type[10]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, res_req->type[11]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, res_req->type[12]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, res_req->type[13]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, res_req->type[14]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, res_req->type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_POOL_COUNT(cmd, pool_count) \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, pool_count) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_POOL(cmd, pool_index) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, pool_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_POOL(cmd, type) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 8, char, type[0]);\ -+ MC_RSP_OP(cmd, 1, 8, 8, char, type[1]);\ -+ MC_RSP_OP(cmd, 1, 16, 8, char, type[2]);\ -+ MC_RSP_OP(cmd, 1, 24, 8, char, type[3]);\ -+ MC_RSP_OP(cmd, 1, 32, 8, char, type[4]);\ -+ MC_RSP_OP(cmd, 1, 40, 8, char, type[5]);\ -+ MC_RSP_OP(cmd, 1, 48, 8, char, type[6]);\ -+ MC_RSP_OP(cmd, 1, 56, 8, char, type[7]);\ -+ MC_RSP_OP(cmd, 2, 0, 8, char, type[8]);\ -+ MC_RSP_OP(cmd, 2, 8, 8, char, type[9]);\ -+ MC_RSP_OP(cmd, 2, 16, 8, char, type[10]);\ -+ MC_RSP_OP(cmd, 2, 24, 8, char, type[11]);\ -+ MC_RSP_OP(cmd, 2, 32, 8, char, type[12]);\ -+ MC_RSP_OP(cmd, 2, 40, 8, char, type[13]);\ -+ MC_RSP_OP(cmd, 2, 48, 8, char, type[14]);\ -+ MC_RSP_OP(cmd, 2, 56, 8, char, type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_OBJ_COUNT(cmd, obj_count) \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, obj_count) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_OBJ(cmd, obj_index) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_OBJ(cmd, obj_desc) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, obj_desc->id); \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, obj_desc->vendor); \ -+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, obj_desc->irq_count); \ -+ MC_RSP_OP(cmd, 1, 24, 8, uint8_t, obj_desc->region_count); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, obj_desc->state);\ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, obj_desc->ver_major);\ -+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, obj_desc->ver_minor);\ -+ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, obj_desc->flags); \ -+ MC_RSP_OP(cmd, 3, 0, 8, char, obj_desc->type[0]);\ -+ MC_RSP_OP(cmd, 3, 8, 8, char, obj_desc->type[1]);\ -+ MC_RSP_OP(cmd, 3, 16, 8, char, obj_desc->type[2]);\ -+ MC_RSP_OP(cmd, 3, 24, 8, char, obj_desc->type[3]);\ -+ MC_RSP_OP(cmd, 3, 32, 8, char, obj_desc->type[4]);\ -+ MC_RSP_OP(cmd, 3, 40, 8, char, obj_desc->type[5]);\ -+ MC_RSP_OP(cmd, 3, 48, 8, char, obj_desc->type[6]);\ -+ MC_RSP_OP(cmd, 3, 56, 8, char, obj_desc->type[7]);\ -+ MC_RSP_OP(cmd, 4, 0, 8, char, obj_desc->type[8]);\ -+ MC_RSP_OP(cmd, 4, 8, 8, char, obj_desc->type[9]);\ -+ MC_RSP_OP(cmd, 4, 16, 8, char, obj_desc->type[10]);\ -+ MC_RSP_OP(cmd, 4, 24, 8, char, obj_desc->type[11]);\ -+ MC_RSP_OP(cmd, 4, 32, 8, char, obj_desc->type[12]);\ -+ MC_RSP_OP(cmd, 4, 40, 8, char, obj_desc->type[13]);\ -+ MC_RSP_OP(cmd, 4, 48, 8, char, obj_desc->type[14]);\ -+ MC_RSP_OP(cmd, 4, 56, 8, char, obj_desc->type[15]);\ -+ MC_RSP_OP(cmd, 5, 0, 8, char, obj_desc->label[0]);\ -+ MC_RSP_OP(cmd, 5, 8, 8, char, obj_desc->label[1]);\ -+ MC_RSP_OP(cmd, 5, 16, 8, char, obj_desc->label[2]);\ -+ MC_RSP_OP(cmd, 5, 24, 8, char, obj_desc->label[3]);\ -+ MC_RSP_OP(cmd, 5, 32, 8, char, obj_desc->label[4]);\ -+ MC_RSP_OP(cmd, 5, 40, 8, char, obj_desc->label[5]);\ -+ MC_RSP_OP(cmd, 5, 48, 8, char, obj_desc->label[6]);\ -+ MC_RSP_OP(cmd, 5, 56, 8, char, obj_desc->label[7]);\ -+ MC_RSP_OP(cmd, 6, 0, 8, char, obj_desc->label[8]);\ -+ MC_RSP_OP(cmd, 6, 8, 8, char, obj_desc->label[9]);\ -+ MC_RSP_OP(cmd, 6, 16, 8, char, obj_desc->label[10]);\ -+ MC_RSP_OP(cmd, 6, 24, 8, char, obj_desc->label[11]);\ -+ MC_RSP_OP(cmd, 6, 32, 8, char, obj_desc->label[12]);\ -+ MC_RSP_OP(cmd, 6, 40, 8, char, obj_desc->label[13]);\ -+ MC_RSP_OP(cmd, 6, 48, 8, char, obj_desc->label[14]);\ -+ MC_RSP_OP(cmd, 6, 56, 8, char, obj_desc->label[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_OBJ_DESC(cmd, obj_type, obj_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id);\ -+ MC_CMD_OP(cmd, 1, 0, 8, char, obj_type[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, obj_type[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, obj_type[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, obj_type[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, obj_type[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, obj_type[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, obj_type[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, obj_type[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, obj_type[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, obj_type[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, obj_type[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, obj_type[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, obj_type[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, obj_type[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, obj_type[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, obj_type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_OBJ_DESC(cmd, obj_desc) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, obj_desc->id); \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, obj_desc->vendor); \ -+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, obj_desc->irq_count); \ -+ MC_RSP_OP(cmd, 1, 24, 8, uint8_t, obj_desc->region_count); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, obj_desc->state);\ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, obj_desc->ver_major);\ -+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, obj_desc->ver_minor);\ -+ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, obj_desc->flags); \ -+ MC_RSP_OP(cmd, 3, 0, 8, char, obj_desc->type[0]);\ -+ MC_RSP_OP(cmd, 3, 8, 8, char, obj_desc->type[1]);\ -+ MC_RSP_OP(cmd, 3, 16, 8, char, obj_desc->type[2]);\ -+ MC_RSP_OP(cmd, 3, 24, 8, char, obj_desc->type[3]);\ -+ MC_RSP_OP(cmd, 3, 32, 8, char, obj_desc->type[4]);\ -+ MC_RSP_OP(cmd, 3, 40, 8, char, obj_desc->type[5]);\ -+ MC_RSP_OP(cmd, 3, 48, 8, char, obj_desc->type[6]);\ -+ MC_RSP_OP(cmd, 3, 56, 8, char, obj_desc->type[7]);\ -+ MC_RSP_OP(cmd, 4, 0, 8, char, obj_desc->type[8]);\ -+ MC_RSP_OP(cmd, 4, 8, 8, char, obj_desc->type[9]);\ -+ MC_RSP_OP(cmd, 4, 16, 8, char, obj_desc->type[10]);\ -+ MC_RSP_OP(cmd, 4, 24, 8, char, obj_desc->type[11]);\ -+ MC_RSP_OP(cmd, 4, 32, 8, char, obj_desc->type[12]);\ -+ MC_RSP_OP(cmd, 4, 40, 8, char, obj_desc->type[13]);\ -+ MC_RSP_OP(cmd, 4, 48, 8, char, obj_desc->type[14]);\ -+ MC_RSP_OP(cmd, 4, 56, 8, char, obj_desc->type[15]);\ -+ MC_RSP_OP(cmd, 5, 0, 8, char, obj_desc->label[0]);\ -+ MC_RSP_OP(cmd, 5, 8, 8, char, obj_desc->label[1]);\ -+ MC_RSP_OP(cmd, 5, 16, 8, char, obj_desc->label[2]);\ -+ MC_RSP_OP(cmd, 5, 24, 8, char, obj_desc->label[3]);\ -+ MC_RSP_OP(cmd, 5, 32, 8, char, obj_desc->label[4]);\ -+ MC_RSP_OP(cmd, 5, 40, 8, char, obj_desc->label[5]);\ -+ MC_RSP_OP(cmd, 5, 48, 8, char, obj_desc->label[6]);\ -+ MC_RSP_OP(cmd, 5, 56, 8, char, obj_desc->label[7]);\ -+ MC_RSP_OP(cmd, 6, 0, 8, char, obj_desc->label[8]);\ -+ MC_RSP_OP(cmd, 6, 8, 8, char, obj_desc->label[9]);\ -+ MC_RSP_OP(cmd, 6, 16, 8, char, obj_desc->label[10]);\ -+ MC_RSP_OP(cmd, 6, 24, 8, char, obj_desc->label[11]);\ -+ MC_RSP_OP(cmd, 6, 32, 8, char, obj_desc->label[12]);\ -+ MC_RSP_OP(cmd, 6, 40, 8, char, obj_desc->label[13]);\ -+ MC_RSP_OP(cmd, 6, 48, 8, char, obj_desc->label[14]);\ -+ MC_RSP_OP(cmd, 6, 56, 8, char, obj_desc->label[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_RES_COUNT(cmd, type) \ -+do { \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_RES_COUNT(cmd, res_count) \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, res_count) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_RES_IDS(cmd, range_desc, type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 42, 7, enum dprc_iter_status, \ -+ range_desc->iter_status); \ -+ MC_CMD_OP(cmd, 1, 0, 32, int, range_desc->base_id); \ -+ MC_CMD_OP(cmd, 1, 32, 32, int, range_desc->last_id);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, type[0]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, type[1]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, type[2]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, type[3]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, type[4]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, type[5]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, type[6]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, type[7]);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, type[8]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, type[9]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, type[10]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, type[11]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, type[12]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, type[13]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, type[14]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_RES_IDS(cmd, range_desc) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 42, 7, enum dprc_iter_status, \ -+ range_desc->iter_status);\ -+ MC_RSP_OP(cmd, 1, 0, 32, int, range_desc->base_id); \ -+ MC_RSP_OP(cmd, 1, 32, 32, int, range_desc->last_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_OBJ_REGION(cmd, obj_type, obj_id, region_index) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, region_index);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\ -+ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\ -+ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\ -+ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\ -+ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\ -+ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\ -+ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\ -+ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\ -+ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\ -+} while (0) -+ -+/* param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_OBJ_REGION(cmd, region_desc) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, region_desc->base_offset);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, region_desc->size); \ -+ MC_RSP_OP(cmd, 2, 32, 4, enum dprc_region_type, region_desc->type);\ -+ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, region_desc->flags);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_OBJ_LABEL(cmd, obj_type, obj_id, label) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, label[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, label[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, label[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, label[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, label[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, label[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, label[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, label[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, label[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, label[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, label[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, label[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, label[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, label[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, label[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, label[15]);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\ -+ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\ -+ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\ -+ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\ -+ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\ -+ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\ -+ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\ -+ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\ -+ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, obj_id); \ -+ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\ -+ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\ -+ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\ -+ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\ -+ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\ -+ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\ -+ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\ -+ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\ -+ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, obj_type[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, obj_type[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, obj_type[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, obj_type[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, obj_type[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, obj_type[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, obj_type[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, obj_type[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, obj_type[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, obj_type[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, obj_type[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, obj_type[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, obj_type[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, obj_type[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, obj_type[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, obj_type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_OBJ_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_CONNECT(cmd, endpoint1, endpoint2, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint1->id); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint1->if_id); \ -+ MC_CMD_OP(cmd, 1, 0, 32, int, endpoint2->id); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, endpoint2->if_id); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint1->type[0]); \ -+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint1->type[1]); \ -+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint1->type[2]); \ -+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint1->type[3]); \ -+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint1->type[4]); \ -+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint1->type[5]); \ -+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint1->type[6]); \ -+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint1->type[7]); \ -+ MC_CMD_OP(cmd, 3, 0, 8, char, endpoint1->type[8]); \ -+ MC_CMD_OP(cmd, 3, 8, 8, char, endpoint1->type[9]); \ -+ MC_CMD_OP(cmd, 3, 16, 8, char, endpoint1->type[10]); \ -+ MC_CMD_OP(cmd, 3, 24, 8, char, endpoint1->type[11]); \ -+ MC_CMD_OP(cmd, 3, 32, 8, char, endpoint1->type[12]); \ -+ MC_CMD_OP(cmd, 3, 40, 8, char, endpoint1->type[13]); \ -+ MC_CMD_OP(cmd, 3, 48, 8, char, endpoint1->type[14]); \ -+ MC_CMD_OP(cmd, 3, 56, 8, char, endpoint1->type[15]); \ -+ MC_CMD_OP(cmd, 4, 0, 32, uint32_t, cfg->max_rate); \ -+ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->committed_rate); \ -+ MC_CMD_OP(cmd, 5, 0, 8, char, endpoint2->type[0]); \ -+ MC_CMD_OP(cmd, 5, 8, 8, char, endpoint2->type[1]); \ -+ MC_CMD_OP(cmd, 5, 16, 8, char, endpoint2->type[2]); \ -+ MC_CMD_OP(cmd, 5, 24, 8, char, endpoint2->type[3]); \ -+ MC_CMD_OP(cmd, 5, 32, 8, char, endpoint2->type[4]); \ -+ MC_CMD_OP(cmd, 5, 40, 8, char, endpoint2->type[5]); \ -+ MC_CMD_OP(cmd, 5, 48, 8, char, endpoint2->type[6]); \ -+ MC_CMD_OP(cmd, 5, 56, 8, char, endpoint2->type[7]); \ -+ MC_CMD_OP(cmd, 6, 0, 8, char, endpoint2->type[8]); \ -+ MC_CMD_OP(cmd, 6, 8, 8, char, endpoint2->type[9]); \ -+ MC_CMD_OP(cmd, 6, 16, 8, char, endpoint2->type[10]); \ -+ MC_CMD_OP(cmd, 6, 24, 8, char, endpoint2->type[11]); \ -+ MC_CMD_OP(cmd, 6, 32, 8, char, endpoint2->type[12]); \ -+ MC_CMD_OP(cmd, 6, 40, 8, char, endpoint2->type[13]); \ -+ MC_CMD_OP(cmd, 6, 48, 8, char, endpoint2->type[14]); \ -+ MC_CMD_OP(cmd, 6, 56, 8, char, endpoint2->type[15]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_DISCONNECT(cmd, endpoint) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint->id); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint->if_id); \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, endpoint->type[0]); \ -+ MC_CMD_OP(cmd, 1, 8, 8, char, endpoint->type[1]); \ -+ MC_CMD_OP(cmd, 1, 16, 8, char, endpoint->type[2]); \ -+ MC_CMD_OP(cmd, 1, 24, 8, char, endpoint->type[3]); \ -+ MC_CMD_OP(cmd, 1, 32, 8, char, endpoint->type[4]); \ -+ MC_CMD_OP(cmd, 1, 40, 8, char, endpoint->type[5]); \ -+ MC_CMD_OP(cmd, 1, 48, 8, char, endpoint->type[6]); \ -+ MC_CMD_OP(cmd, 1, 56, 8, char, endpoint->type[7]); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint->type[8]); \ -+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint->type[9]); \ -+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint->type[10]); \ -+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint->type[11]); \ -+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint->type[12]); \ -+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint->type[13]); \ -+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint->type[14]); \ -+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint->type[15]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_CONNECTION(cmd, endpoint1) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint1->id); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint1->if_id); \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, endpoint1->type[0]); \ -+ MC_CMD_OP(cmd, 1, 8, 8, char, endpoint1->type[1]); \ -+ MC_CMD_OP(cmd, 1, 16, 8, char, endpoint1->type[2]); \ -+ MC_CMD_OP(cmd, 1, 24, 8, char, endpoint1->type[3]); \ -+ MC_CMD_OP(cmd, 1, 32, 8, char, endpoint1->type[4]); \ -+ MC_CMD_OP(cmd, 1, 40, 8, char, endpoint1->type[5]); \ -+ MC_CMD_OP(cmd, 1, 48, 8, char, endpoint1->type[6]); \ -+ MC_CMD_OP(cmd, 1, 56, 8, char, endpoint1->type[7]); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint1->type[8]); \ -+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint1->type[9]); \ -+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint1->type[10]); \ -+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint1->type[11]); \ -+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint1->type[12]); \ -+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint1->type[13]); \ -+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint1->type[14]); \ -+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint1->type[15]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_CONNECTION(cmd, endpoint2, state) \ -+do { \ -+ MC_RSP_OP(cmd, 3, 0, 32, int, endpoint2->id); \ -+ MC_RSP_OP(cmd, 3, 32, 16, uint16_t, endpoint2->if_id); \ -+ MC_RSP_OP(cmd, 4, 0, 8, char, endpoint2->type[0]); \ -+ MC_RSP_OP(cmd, 4, 8, 8, char, endpoint2->type[1]); \ -+ MC_RSP_OP(cmd, 4, 16, 8, char, endpoint2->type[2]); \ -+ MC_RSP_OP(cmd, 4, 24, 8, char, endpoint2->type[3]); \ -+ MC_RSP_OP(cmd, 4, 32, 8, char, endpoint2->type[4]); \ -+ MC_RSP_OP(cmd, 4, 40, 8, char, endpoint2->type[5]); \ -+ MC_RSP_OP(cmd, 4, 48, 8, char, endpoint2->type[6]); \ -+ MC_RSP_OP(cmd, 4, 56, 8, char, endpoint2->type[7]); \ -+ MC_RSP_OP(cmd, 5, 0, 8, char, endpoint2->type[8]); \ -+ MC_RSP_OP(cmd, 5, 8, 8, char, endpoint2->type[9]); \ -+ MC_RSP_OP(cmd, 5, 16, 8, char, endpoint2->type[10]); \ -+ MC_RSP_OP(cmd, 5, 24, 8, char, endpoint2->type[11]); \ -+ MC_RSP_OP(cmd, 5, 32, 8, char, endpoint2->type[12]); \ -+ MC_RSP_OP(cmd, 5, 40, 8, char, endpoint2->type[13]); \ -+ MC_RSP_OP(cmd, 5, 48, 8, char, endpoint2->type[14]); \ -+ MC_RSP_OP(cmd, 5, 56, 8, char, endpoint2->type[15]); \ -+ MC_RSP_OP(cmd, 6, 0, 32, int, state); \ -+} while (0) -+ -+#endif /* _FSL_DPRC_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dprtc.h b/drivers/net/dpaa2/mc/fsl_dprtc.h -new file mode 100644 -index 0000000..2eb6edc ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dprtc.h -@@ -0,0 +1,434 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPRTC_H -+#define __FSL_DPRTC_H -+ -+/* Data Path Real Time Counter API -+ * Contains initialization APIs and runtime control APIs for RTC -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * Number of irq's -+ */ -+#define DPRTC_MAX_IRQ_NUM 1 -+#define DPRTC_IRQ_INDEX 0 -+ -+/** -+ * Interrupt event masks: -+ */ -+ -+/** -+ * Interrupt event mask indicating alarm event had occured -+ */ -+#define DPRTC_EVENT_ALARM 0x40000000 -+/** -+ * Interrupt event mask indicating periodic pulse event had occured -+ */ -+#define DPRTC_EVENT_PPS 0x08000000 -+ -+/** -+ * dprtc_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dprtc_id: DPRTC unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dprtc_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dprtc_id, -+ uint16_t *token); -+ -+/** -+ * dprtc_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dprtc_cfg - Structure representing DPRTC configuration -+ * @options: place holder -+ */ -+struct dprtc_cfg { -+ uint32_t options; -+}; -+ -+/** -+ * dprtc_create() - Create the DPRTC object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPRTC object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dprtc_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dprtc_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dprtc_destroy() - Destroy the DPRTC object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dprtc_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dprtc_set_clock_offset() - Sets the clock's offset -+ * (usually relative to another clock). -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @offset: New clock offset (in nanoseconds). -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int64_t offset); -+ -+/** -+ * dprtc_set_freq_compensation() - Sets a new frequency compensation value. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @freq_compensation: -+ * The new frequency compensation value to set. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t freq_compensation); -+ -+/** -+ * dprtc_get_freq_compensation() - Retrieves the frequency compensation value -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @freq_compensation: -+ * Frequency compensation value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t *freq_compensation); -+ -+/** -+ * dprtc_get_time() - Returns the current RTC time. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @time: Current RTC time. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_time(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t *time); -+ -+/** -+ * dprtc_set_time() - Updates current RTC time. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @time: New RTC time. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_time(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t time); -+ -+/** -+ * dprtc_set_alarm() - Defines and sets alarm. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @time: In nanoseconds, the time when the alarm -+ * should go off - must be a multiple of -+ * 1 microsecond -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_alarm(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t time); -+ -+/** -+ * struct dprtc_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dprtc_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dprtc_set_irq() - Set IRQ information for the DPRTC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprtc_irq_cfg *irq_cfg); -+ -+/** -+ * dprtc_get_irq() - Get IRQ information from the DPRTC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprtc_irq_cfg *irq_cfg); -+ -+/** -+ * dprtc_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dprtc_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dprtc_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dprtc_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dprtc_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dprtc_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dprtc_attr - Structure representing DPRTC attributes -+ * @id: DPRTC object ID -+ * @version: DPRTC version -+ */ -+struct dprtc_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPRTC version -+ * @major: DPRTC major version -+ * @minor: DPRTC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dprtc_get_attributes - Retrieve DPRTC attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprtc_attr *attr); -+ -+#endif /* __FSL_DPRTC_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h b/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h -new file mode 100644 -index 0000000..aeccece ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h -@@ -0,0 +1,181 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPRTC_CMD_H -+#define _FSL_DPRTC_CMD_H -+ -+/* DPRTC Version */ -+#define DPRTC_VER_MAJOR 1 -+#define DPRTC_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPRTC_CMDID_CLOSE 0x800 -+#define DPRTC_CMDID_OPEN 0x810 -+#define DPRTC_CMDID_CREATE 0x910 -+#define DPRTC_CMDID_DESTROY 0x900 -+ -+#define DPRTC_CMDID_ENABLE 0x002 -+#define DPRTC_CMDID_DISABLE 0x003 -+#define DPRTC_CMDID_GET_ATTR 0x004 -+#define DPRTC_CMDID_RESET 0x005 -+#define DPRTC_CMDID_IS_ENABLED 0x006 -+ -+#define DPRTC_CMDID_SET_IRQ 0x010 -+#define DPRTC_CMDID_GET_IRQ 0x011 -+#define DPRTC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPRTC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPRTC_CMDID_SET_IRQ_MASK 0x014 -+#define DPRTC_CMDID_GET_IRQ_MASK 0x015 -+#define DPRTC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPRTC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPRTC_CMDID_SET_CLOCK_OFFSET 0x1d0 -+#define DPRTC_CMDID_SET_FREQ_COMPENSATION 0x1d1 -+#define DPRTC_CMDID_GET_FREQ_COMPENSATION 0x1d2 -+#define DPRTC_CMDID_GET_TIME 0x1d3 -+#define DPRTC_CMDID_SET_TIME 0x1d4 -+#define DPRTC_CMDID_SET_ALARM 0x1d5 -+#define DPRTC_CMDID_SET_PERIODIC_PULSE 0x1d6 -+#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE 0x1d7 -+#define DPRTC_CMDID_SET_EXT_TRIGGER 0x1d8 -+#define DPRTC_CMDID_CLEAR_EXT_TRIGGER 0x1d9 -+#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP 0x1dA -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_OPEN(cmd, dpbp_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_CLOCK_OFFSET(cmd, offset) \ -+ MC_CMD_OP(cmd, 0, 0, 64, int64_t, offset) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_FREQ_COMPENSATION(cmd, freq_compensation) \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, freq_compensation) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_FREQ_COMPENSATION(cmd, freq_compensation) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, freq_compensation) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_TIME(cmd, time) \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, time) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_TIME(cmd, time) \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_ALARM(cmd, time) \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time) -+ -+#endif /* _FSL_DPRTC_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpseci.h b/drivers/net/dpaa2/mc/fsl_dpseci.h -new file mode 100644 -index 0000000..1dd7215 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpseci.h -@@ -0,0 +1,647 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPSECI_H -+#define __FSL_DPSECI_H -+ -+/* Data Path SEC Interface API -+ * Contains initialization APIs and runtime control APIs for DPSECI -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * General DPSECI macros -+ */ -+ -+/** -+ * Maximum number of Tx/Rx priorities per DPSECI object -+ */ -+#define DPSECI_PRIO_NUM 8 -+ -+/** -+ * All queues considered; see dpseci_set_rx_queue() -+ */ -+#define DPSECI_ALL_QUEUES (uint8_t)(-1) -+ -+/** -+ * dpseci_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpseci_id: DPSECI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpseci_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpseci_id, -+ uint16_t *token); -+ -+/** -+ * dpseci_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpseci_cfg - Structure representing DPSECI configuration -+ * @num_tx_queues: num of queues towards the SEC -+ * @num_rx_queues: num of queues back from the SEC -+ * @priorities: Priorities for the SEC hardware processing; -+ * each place in the array is the priority of the tx queue -+ * towards the SEC, -+ * valid priorities are configured with values 1-8; -+ */ -+struct dpseci_cfg { -+ uint8_t num_tx_queues; -+ uint8_t num_rx_queues; -+ uint8_t priorities[DPSECI_PRIO_NUM]; -+}; -+ -+/** -+ * dpseci_create() - Create the DPSECI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPSECI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpseci_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpseci_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpseci_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpseci_is_enabled() - Check if the DPSECI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpseci_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpseci_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpseci_set_irq() - Set IRQ information for the DPSECI to trigger an interrupt -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpseci_irq_cfg *irq_cfg); -+ -+/** -+ * dpseci_get_irq() - Get IRQ information from the DPSECI -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpseci_irq_cfg *irq_cfg); -+ -+/** -+ * dpseci_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpseci_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned Interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpseci_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpseci_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpseci_get_irq_status() - Get the current status of any pending interrupts -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpseci_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpseci_attr - Structure representing DPSECI attributes -+ * @id: DPSECI object ID -+ * @version: DPSECI version -+ * @num_tx_queues: number of queues towards the SEC -+ * @num_rx_queues: number of queues back from the SEC -+ */ -+struct dpseci_attr { -+ int id; -+ /** -+ * struct version - DPSECI version -+ * @major: DPSECI major version -+ * @minor: DPSECI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint8_t num_tx_queues; -+ uint8_t num_rx_queues; -+}; -+ -+/** -+ * dpseci_get_attributes() - Retrieve DPSECI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_attr *attr); -+ -+/** -+ * enum dpseci_dest - DPSECI destination types -+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode -+ * and does not generate FQDAN notifications; user is expected to -+ * dequeue from the queue based on polling or other user-defined -+ * method -+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to dequeue -+ * from the queue only after notification is received -+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified DPCON -+ * object; user is expected to dequeue from the DPCON channel -+ */ -+enum dpseci_dest { -+ DPSECI_DEST_NONE = 0, -+ DPSECI_DEST_DPIO = 1, -+ DPSECI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPSECI_DEST_NONE' option -+ */ -+struct dpseci_dest_cfg { -+ enum dpseci_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/** -+ * DPSECI queue modification options -+ */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001 -+ -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPSECI_QUEUE_OPT_DEST 0x00000002 -+ -+/** -+ * Select to modify the queue's order preservation -+ */ -+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004 -+ -+/** -+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPSECI_QUEUE_OPT_' flags -+ * @order_preservation_en: order preservation configuration for the rx queue -+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options' -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; -+ * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options' -+ */ -+struct dpseci_rx_queue_cfg { -+ uint32_t options; -+ int order_preservation_en; -+ uint64_t user_ctx; -+ struct dpseci_dest_cfg dest_cfg; -+}; -+ -+/** -+ * dpseci_set_rx_queue() - Set Rx queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @queue: Select the queue relative to number of -+ * priorities configured at DPSECI creation; use -+ * DPSECI_ALL_QUEUES to configure all Rx queues identically. -+ * @cfg: Rx queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ const struct dpseci_rx_queue_cfg *cfg); -+ -+/** -+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @order_preservation_en: Status of the order preservation configuration -+ * on the queue -+ * @dest_cfg: Queue destination configuration -+ * @fqid: Virtual FQID value to be used for dequeue operations -+ */ -+struct dpseci_rx_queue_attr { -+ uint64_t user_ctx; -+ int order_preservation_en; -+ struct dpseci_dest_cfg dest_cfg; -+ uint32_t fqid; -+}; -+ -+/** -+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @queue: Select the queue relative to number of -+ * priorities configured at DPSECI creation -+ * @attr: Returned Rx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ struct dpseci_rx_queue_attr *attr); -+ -+/** -+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues -+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware -+ * @priority: SEC hardware processing priority for the queue -+ */ -+struct dpseci_tx_queue_attr { -+ uint32_t fqid; -+ uint8_t priority; -+}; -+ -+/** -+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @queue: Select the queue relative to number of -+ * priorities configured at DPSECI creation -+ * @attr: Returned Tx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ struct dpseci_tx_queue_attr *attr); -+ -+/** -+ * struct dpseci_sec_attr - Structure representing attributes of the SEC -+ * hardware accelerator -+ * @ip_id: ID for SEC. -+ * @major_rev: Major revision number for SEC. -+ * @minor_rev: Minor revision number for SEC. -+ * @era: SEC Era. -+ * @deco_num: The number of copies of the DECO that are implemented in -+ * this version of SEC. -+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented -+ * in this version of SEC. -+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented -+ * in this version of SEC. -+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are -+ * implemented in this version of SEC. -+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are -+ * implemented in this version of SEC. -+ * @crc_acc_num: The number of copies of the CRC module that are implemented -+ * in this version of SEC. -+ * @pk_acc_num: The number of copies of the Public Key module that are -+ * implemented in this version of SEC. -+ * @kasumi_acc_num: The number of copies of the Kasumi module that are -+ * implemented in this version of SEC. -+ * @rng_acc_num: The number of copies of the Random Number Generator that are -+ * implemented in this version of SEC. -+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are -+ * implemented in this version of SEC. -+ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented -+ * in this version of SEC. -+ * @des_acc_num: The number of copies of the DES module that are implemented -+ * in this version of SEC. -+ * @aes_acc_num: The number of copies of the AES module that are implemented -+ * in this version of SEC. -+ **/ -+ -+struct dpseci_sec_attr { -+ uint16_t ip_id; -+ uint8_t major_rev; -+ uint8_t minor_rev; -+ uint8_t era; -+ uint8_t deco_num; -+ uint8_t zuc_auth_acc_num; -+ uint8_t zuc_enc_acc_num; -+ uint8_t snow_f8_acc_num; -+ uint8_t snow_f9_acc_num; -+ uint8_t crc_acc_num; -+ uint8_t pk_acc_num; -+ uint8_t kasumi_acc_num; -+ uint8_t rng_acc_num; -+ uint8_t md_acc_num; -+ uint8_t arc4_acc_num; -+ uint8_t des_acc_num; -+ uint8_t aes_acc_num; -+}; -+ -+/** -+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @attr: Returned SEC attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_sec_attr *attr); -+ -+/** -+ * struct dpseci_sec_counters - Structure representing global SEC counters and -+ * not per dpseci counters -+ * @dequeued_requests: Number of Requests Dequeued -+ * @ob_enc_requests: Number of Outbound Encrypt Requests -+ * @ib_dec_requests: Number of Inbound Decrypt Requests -+ * @ob_enc_bytes: Number of Outbound Bytes Encrypted -+ * @ob_prot_bytes: Number of Outbound Bytes Protected -+ * @ib_dec_bytes: Number of Inbound Bytes Decrypted -+ * @ib_valid_bytes: Number of Inbound Bytes Validated -+ */ -+struct dpseci_sec_counters { -+ uint64_t dequeued_requests; -+ uint64_t ob_enc_requests; -+ uint64_t ib_dec_requests; -+ uint64_t ob_enc_bytes; -+ uint64_t ob_prot_bytes; -+ uint64_t ib_dec_bytes; -+ uint64_t ib_valid_bytes; -+}; -+ -+/** -+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @counters: Returned SEC counters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_sec_counters *counters); -+ -+#endif /* __FSL_DPSECI_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h b/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h -new file mode 100644 -index 0000000..6c0b96e ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h -@@ -0,0 +1,241 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPSECI_CMD_H -+#define _FSL_DPSECI_CMD_H -+ -+/* DPSECI Version */ -+#define DPSECI_VER_MAJOR 3 -+#define DPSECI_VER_MINOR 1 -+ -+/* Command IDs */ -+#define DPSECI_CMDID_CLOSE 0x800 -+#define DPSECI_CMDID_OPEN 0x809 -+#define DPSECI_CMDID_CREATE 0x909 -+#define DPSECI_CMDID_DESTROY 0x900 -+ -+#define DPSECI_CMDID_ENABLE 0x002 -+#define DPSECI_CMDID_DISABLE 0x003 -+#define DPSECI_CMDID_GET_ATTR 0x004 -+#define DPSECI_CMDID_RESET 0x005 -+#define DPSECI_CMDID_IS_ENABLED 0x006 -+ -+#define DPSECI_CMDID_SET_IRQ 0x010 -+#define DPSECI_CMDID_GET_IRQ 0x011 -+#define DPSECI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPSECI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPSECI_CMDID_SET_IRQ_MASK 0x014 -+#define DPSECI_CMDID_GET_IRQ_MASK 0x015 -+#define DPSECI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPSECI_CMDID_SET_RX_QUEUE 0x194 -+#define DPSECI_CMDID_GET_RX_QUEUE 0x196 -+#define DPSECI_CMDID_GET_TX_QUEUE 0x197 -+#define DPSECI_CMDID_GET_SEC_ATTR 0x198 -+#define DPSECI_CMDID_GET_SEC_COUNTERS 0x199 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_OPEN(cmd, dpseci_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->priorities[0]);\ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[1]);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[2]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->priorities[3]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priorities[4]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->priorities[5]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->priorities[6]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->priorities[7]);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->num_tx_queues);\ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->num_rx_queues);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \ -+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_tx_queues); \ -+ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->num_rx_queues); \ -+ MC_RSP_OP(cmd, 5, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 5, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue); \ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpseci_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+ MC_CMD_OP(cmd, 2, 32, 1, int, cfg->order_preservation_en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_RX_QUEUE(cmd, queue) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_RX_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 48, 4, enum dpseci_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\ -+ MC_RSP_OP(cmd, 2, 32, 1, int, attr->order_preservation_en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_TX_QUEUE(cmd, queue) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_TX_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid);\ -+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->priority);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_SEC_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->ip_id);\ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->major_rev);\ -+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->minor_rev);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->era);\ -+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->deco_num);\ -+ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->zuc_auth_acc_num);\ -+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, attr->zuc_enc_acc_num);\ -+ MC_RSP_OP(cmd, 1, 32, 8, uint8_t, attr->snow_f8_acc_num);\ -+ MC_RSP_OP(cmd, 1, 40, 8, uint8_t, attr->snow_f9_acc_num);\ -+ MC_RSP_OP(cmd, 1, 48, 8, uint8_t, attr->crc_acc_num);\ -+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->pk_acc_num);\ -+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->kasumi_acc_num);\ -+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->rng_acc_num);\ -+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->md_acc_num);\ -+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->arc4_acc_num);\ -+ MC_RSP_OP(cmd, 2, 48, 8, uint8_t, attr->des_acc_num);\ -+ MC_RSP_OP(cmd, 2, 56, 8, uint8_t, attr->aes_acc_num);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, counters->dequeued_requests);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counters->ob_enc_requests);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, counters->ib_dec_requests);\ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, counters->ob_enc_bytes);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, counters->ob_prot_bytes);\ -+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, counters->ib_dec_bytes);\ -+ MC_RSP_OP(cmd, 6, 0, 64, uint64_t, counters->ib_valid_bytes);\ -+} while (0) -+ -+#endif /* _FSL_DPSECI_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpsw.h b/drivers/net/dpaa2/mc/fsl_dpsw.h -new file mode 100644 -index 0000000..9c1bd9d ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpsw.h -@@ -0,0 +1,2164 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPSW_H -+#define __FSL_DPSW_H -+ -+#include -+ -+/* Data Path L2-Switch API -+ * Contains API for handling DPSW topology and functionality -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * DPSW general definitions -+ */ -+ -+/** -+ * Maximum number of traffic class priorities -+ */ -+#define DPSW_MAX_PRIORITIES 8 -+/** -+ * Maximum number of interfaces -+ */ -+#define DPSW_MAX_IF 64 -+ -+/** -+ * dpsw_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpsw_id: DPSW unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpsw_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpsw_id, -+ uint16_t *token); -+ -+/** -+ * dpsw_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPSW options -+ */ -+ -+/** -+ * Disable flooding -+ */ -+#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL -+/** -+ * Disable Multicast -+ */ -+#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL -+/** -+ * Support control interface -+ */ -+#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL -+/** -+ * Disable flooding metering -+ */ -+#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL -+/** -+ * Enable metering -+ */ -+#define DPSW_OPT_METERING_EN 0x0000000000000040ULL -+ -+/** -+ * enum dpsw_component_type - component type of a bridge -+ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an -+ * enterprise VLAN bridge or of a Provider Bridge used -+ * to process C-tagged frames -+ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a -+ * Provider Bridge -+ * -+ */ -+enum dpsw_component_type { -+ DPSW_COMPONENT_TYPE_C_VLAN = 0, -+ DPSW_COMPONENT_TYPE_S_VLAN -+}; -+ -+/** -+ * struct dpsw_cfg - DPSW configuration -+ * @num_ifs: Number of external and internal interfaces -+ * @adv: Advanced parameters; default is all zeros; -+ * use this structure to change default settings -+ */ -+struct dpsw_cfg { -+ uint16_t num_ifs; -+ /** -+ * struct adv - Advanced parameters -+ * @options: Enable/Disable DPSW features (bitmap) -+ * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16 -+ * @max_meters_per_if: Number of meters per interface -+ * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16 -+ * @max_fdb_entries: Number of FDB entries for default FDB table; -+ * 0 - indicates default 1024 entries. -+ * @fdb_aging_time: Default FDB aging time for default FDB table; -+ * 0 - indicates default 300 seconds -+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table; -+ * 0 - indicates default 32 -+ * @component_type: Indicates the component type of this bridge -+ */ -+ struct { -+ uint64_t options; -+ uint16_t max_vlans; -+ uint8_t max_meters_per_if; -+ uint8_t max_fdbs; -+ uint16_t max_fdb_entries; -+ uint16_t fdb_aging_time; -+ uint16_t max_fdb_mc_groups; -+ enum dpsw_component_type component_type; -+ } adv; -+}; -+ -+/** -+ * dpsw_create() - Create the DPSW object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPSW object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpsw_open() function to get an authentication -+ * token first -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpsw_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpsw_destroy() - Destroy the DPSW object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpsw_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpsw_enable() - Enable DPSW functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpsw_disable() - Disable DPSW functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpsw_is_enabled() - Check if the DPSW is enabled -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpsw_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpsw_reset() - Reset the DPSW, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPSW IRQ Index and Events -+ */ -+ -+#define DPSW_IRQ_INDEX_IF 0x0000 -+#define DPSW_IRQ_INDEX_L2SW 0x0001 -+ -+/** -+ * IRQ event - Indicates that the link state changed -+ */ -+#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001 -+ -+/** -+ * struct dpsw_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpsw_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpsw_irq_cfg *irq_cfg); -+ -+/** -+ * dpsw_get_irq() - Get IRQ information from the DPSW -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpsw_irq_cfg *irq_cfg); -+ -+/** -+ * dpsw_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpsw_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned Interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpsw_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpsw_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpsw_get_irq_status() - Get the current status of any pending interrupts -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpsw_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+/** -+ * struct dpsw_attr - Structure representing DPSW attributes -+ * @id: DPSW object ID -+ * @version: DPSW version -+ * @options: Enable/Disable DPSW features -+ * @max_vlans: Maximum Number of VLANs -+ * @max_meters_per_if: Number of meters per interface -+ * @max_fdbs: Maximum Number of FDBs -+ * @max_fdb_entries: Number of FDB entries for default FDB table; -+ * 0 - indicates default 1024 entries. -+ * @fdb_aging_time: Default FDB aging time for default FDB table; -+ * 0 - indicates default 300 seconds -+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table; -+ * 0 - indicates default 32 -+ * @mem_size: DPSW frame storage memory size -+ * @num_ifs: Number of interfaces -+ * @num_vlans: Current number of VLANs -+ * @num_fdbs: Current number of FDBs -+ * @component_type: Component type of this bridge -+ */ -+struct dpsw_attr { -+ int id; -+ /** -+ * struct version - DPSW version -+ * @major: DPSW major version -+ * @minor: DPSW minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint64_t options; -+ uint16_t max_vlans; -+ uint8_t max_meters_per_if; -+ uint8_t max_fdbs; -+ uint16_t max_fdb_entries; -+ uint16_t fdb_aging_time; -+ uint16_t max_fdb_mc_groups; -+ uint16_t num_ifs; -+ uint16_t mem_size; -+ uint16_t num_vlans; -+ uint8_t num_fdbs; -+ enum dpsw_component_type component_type; -+}; -+ -+/** -+ * dpsw_get_attributes() - Retrieve DPSW attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @attr: Returned DPSW attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_attr *attr); -+ -+/** -+ * dpsw_set_reflection_if() - Set target interface for reflected interfaces. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Id -+ * -+ * Only one reflection receive interface is allowed per switch -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id); -+ -+/** -+ * enum dpsw_action - Action selection for special/control frames -+ * @DPSW_ACTION_DROP: Drop frame -+ * @DPSW_ACTION_REDIRECT: Redirect frame to control port -+ */ -+enum dpsw_action { -+ DPSW_ACTION_DROP = 0, -+ DPSW_ACTION_REDIRECT = 1 -+}; -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpsw_link_cfg - Structure representing DPSW link configuration -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPSW_LINK_OPT_' values -+ */ -+struct dpsw_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpsw_if_set_link_cfg() - set the link configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @cfg: Link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_cfg *cfg); -+/** -+ * struct dpsw_link_state - Structure representing DPSW link state -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPSW_LINK_OPT_' values -+ * @up: 0 - covers two cases: down and disconnected, 1 - up -+ */ -+struct dpsw_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpsw_if_get_link_state - Return the link state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @state: link state 1 - linkup, 0 - link down or disconnected -+ * -+ * @returns '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_state *state); -+ -+/** -+ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @en: 1 - enable, 0 - disable -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en); -+ -+/** -+ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @en: 1 - enable, 0 - disable -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en); -+ -+/** -+ * dpsw_if_set_multicast() - Enable/disable multicast for particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @en: 1 - enable, 0 - disable -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en); -+ -+/** -+ * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration -+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers -+ * to the IEEE 802.1p priority -+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used -+ * separately or in conjunction with PCP to indicate frames -+ * eligible to be dropped in the presence of congestion -+ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN -+ * to which the frame belongs. The hexadecimal values -+ * of 0x000 and 0xFFF are reserved; -+ * all other values may be used as VLAN identifiers, -+ * allowing up to 4,094 VLANs -+ */ -+struct dpsw_tci_cfg { -+ uint8_t pcp; -+ uint8_t dei; -+ uint16_t vlan_id; -+}; -+ -+/** -+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Tag Control Information Configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tci_cfg *cfg); -+ -+/** -+ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Tag Control Information Configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_tci_cfg *cfg); -+ -+/** -+ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states -+ * @DPSW_STP_STATE_BLOCKING: Blocking state -+ * @DPSW_STP_STATE_LISTENING: Listening state -+ * @DPSW_STP_STATE_LEARNING: Learning state -+ * @DPSW_STP_STATE_FORWARDING: Forwarding state -+ * -+ */ -+enum dpsw_stp_state { -+ DPSW_STP_STATE_BLOCKING = 0, -+ DPSW_STP_STATE_LISTENING = 1, -+ DPSW_STP_STATE_LEARNING = 2, -+ DPSW_STP_STATE_FORWARDING = 3 -+}; -+ -+/** -+ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration -+ * @vlan_id: VLAN ID STP state -+ * @state: STP state -+ */ -+struct dpsw_stp_cfg { -+ uint16_t vlan_id; -+ enum dpsw_stp_state state; -+}; -+ -+/** -+ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: STP State configuration parameters -+ * -+ * The following STP states are supported - -+ * blocking, listening, learning, forwarding and disabled. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_stp_cfg *cfg); -+ -+/** -+ * enum dpsw_accepted_frames - Types of frames to accept -+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and -+ * priority tagged frames -+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or -+ * Priority-Tagged frames received on this interface. -+ * -+ */ -+enum dpsw_accepted_frames { -+ DPSW_ADMIT_ALL = 1, -+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3 -+}; -+ -+/** -+ * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration -+ * @type: Defines ingress accepted frames -+ * @unaccept_act: When a frame is not accepted, it may be discarded or -+ * redirected to control interface depending on this mode -+ */ -+struct dpsw_accepted_frames_cfg { -+ enum dpsw_accepted_frames type; -+ enum dpsw_action unaccept_act; -+}; -+ -+/** -+ * dpsw_if_set_accepted_frames() -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Frame types configuration -+ * -+ * When is admit_only_vlan_tagged- the device will discard untagged -+ * frames or Priority-Tagged frames received on this interface. -+ * When admit_only_untagged- untagged frames or Priority-Tagged -+ * frames received on this interface will be accepted and assigned -+ * to a VID based on the PVID and VID Set for this interface. -+ * When admit_all - the device will accept VLAN tagged, untagged -+ * and priority tagged frames. -+ * The default is admit_all -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_accepted_frames_cfg *cfg); -+ -+/** -+ * dpsw_if_set_accept_all_vlan() -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @accept_all: Accept or drop frames having different VLAN -+ * -+ * When this is accept (FALSE), the device will discard incoming -+ * frames for VLANs that do not include this interface in its -+ * Member set. When accept (TRUE), the interface will accept all incoming frames -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int accept_all); -+ -+/** -+ * enum dpsw_counter - Counters types -+ * @DPSW_CNT_ING_FRAME: Counts ingress frames -+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes -+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames -+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame -+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames -+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes -+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames -+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes -+ * @DPSW_CNT_EGR_FRAME: Counts egress frames -+ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes -+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames -+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames -+ */ -+enum dpsw_counter { -+ DPSW_CNT_ING_FRAME = 0x0, -+ DPSW_CNT_ING_BYTE = 0x1, -+ DPSW_CNT_ING_FLTR_FRAME = 0x2, -+ DPSW_CNT_ING_FRAME_DISCARD = 0x3, -+ DPSW_CNT_ING_MCAST_FRAME = 0x4, -+ DPSW_CNT_ING_MCAST_BYTE = 0x5, -+ DPSW_CNT_ING_BCAST_FRAME = 0x6, -+ DPSW_CNT_ING_BCAST_BYTES = 0x7, -+ DPSW_CNT_EGR_FRAME = 0x8, -+ DPSW_CNT_EGR_BYTE = 0x9, -+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa, -+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb -+}; -+ -+/** -+ * dpsw_if_get_counter() - Get specific counter of particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @type: Counter type -+ * @counter: return value -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t *counter); -+ -+/** -+ * dpsw_if_set_counter() - Set specific counter of particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @type: Counter type -+ * @counter: New counter value -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t counter); -+ -+/** -+ * Maximum number of TC -+ */ -+#define DPSW_MAX_TC 8 -+ -+/** -+ * enum dpsw_priority_selector - User priority -+ * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which -+ * refers to the IEEE 802.1p priority. -+ * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit -+ * field from IP header -+ * -+ */ -+enum dpsw_priority_selector { -+ DPSW_UP_PCP = 0, -+ DPSW_UP_DSCP = 1 -+}; -+ -+/** -+ * enum dpsw_schedule_mode - Traffic classes scheduling -+ * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority -+ * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm -+ */ -+enum dpsw_schedule_mode { -+ DPSW_SCHED_STRICT_PRIORITY, -+ DPSW_SCHED_WEIGHTED -+}; -+ -+/** -+ * struct dpsw_tx_schedule_cfg - traffic class configuration -+ * @mode: Strict or weight-based scheduling -+ * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000 -+ */ -+struct dpsw_tx_schedule_cfg { -+ enum dpsw_schedule_mode mode; -+ uint16_t delta_bandwidth; -+}; -+ -+/** -+ * struct dpsw_tx_selection_cfg - Mapping user priority into traffic -+ * class configuration -+ * @priority_selector: Source for user priority regeneration -+ * @tc_id: The Regenerated User priority that the incoming -+ * User Priority is mapped to for this interface -+ * @tc_sched: Traffic classes configuration -+ */ -+struct dpsw_tx_selection_cfg { -+ enum dpsw_priority_selector priority_selector; -+ uint8_t tc_id[DPSW_MAX_PRIORITIES]; -+ struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC]; -+}; -+ -+/** -+ * dpsw_if_set_tx_selection() - Function is used for mapping variety -+ * of frame fields -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Traffic class mapping configuration -+ * -+ * Function is used for mapping variety of frame fields (DSCP, PCP) -+ * to Traffic Class. Traffic class is a number -+ * in the range from 0 to 7 -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tx_selection_cfg *cfg); -+ -+/** -+ * enum dpsw_reflection_filter - Filter type for frames to reflect -+ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames -+ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to -+ * particular VLAN defined by vid parameter -+ * -+ */ -+enum dpsw_reflection_filter { -+ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0, -+ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1 -+}; -+ -+/** -+ * struct dpsw_reflection_cfg - Structure representing reflection information -+ * @filter: Filter type for frames to reflect -+ * @vlan_id: Vlan Id to reflect; valid only when filter type is -+ * DPSW_INGRESS_VLAN -+ */ -+struct dpsw_reflection_cfg { -+ enum dpsw_reflection_filter filter; -+ uint16_t vlan_id; -+}; -+ -+/** -+ * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Reflection configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg); -+ -+/** -+ * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Reflection configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg); -+ -+/** -+ * enum dpsw_metering_mode - Metering modes -+ * @DPSW_METERING_MODE_NONE: metering disabled -+ * @DPSW_METERING_MODE_RFC2698: RFC 2698 -+ * @DPSW_METERING_MODE_RFC4115: RFC 4115 -+ */ -+enum dpsw_metering_mode { -+ DPSW_METERING_MODE_NONE = 0, -+ DPSW_METERING_MODE_RFC2698, -+ DPSW_METERING_MODE_RFC4115 -+}; -+ -+/** -+ * enum dpsw_metering_unit - Metering count -+ * @DPSW_METERING_UNIT_BYTES: count bytes -+ * @DPSW_METERING_UNIT_FRAMES: count frames -+ */ -+enum dpsw_metering_unit { -+ DPSW_METERING_UNIT_BYTES = 0, -+ DPSW_METERING_UNIT_FRAMES -+}; -+ -+/** -+ * struct dpsw_metering_cfg - Metering configuration -+ * @mode: metering modes -+ * @units: Bytes or frame units -+ * @cir: Committed information rate (CIR) in Kbits/s -+ * @eir: Peak information rate (PIR) Kbit/s rfc2698 -+ * Excess information rate (EIR) Kbit/s rfc4115 -+ * @cbs: Committed burst size (CBS) in bytes -+ * @ebs: Peak burst size (PBS) in bytes for rfc2698 -+ * Excess bust size (EBS) in bytes rfc4115 -+ * -+ */ -+struct dpsw_metering_cfg { -+ enum dpsw_metering_mode mode; -+ enum dpsw_metering_unit units; -+ uint32_t cir; -+ uint32_t eir; -+ uint32_t cbs; -+ uint32_t ebs; -+}; -+ -+/** -+ * dpsw_if_set_flooding_metering() - Set flooding metering -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Metering parameters -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_metering_cfg *cfg); -+ -+/** -+ * dpsw_if_set_metering() - Set interface metering for flooding -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @tc_id: Traffic class ID -+ * @cfg: Metering parameters -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ const struct dpsw_metering_cfg *cfg); -+ -+/** -+ * enum dpsw_early_drop_unit - DPSW early drop unit -+ * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes -+ * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames -+ */ -+enum dpsw_early_drop_unit { -+ DPSW_EARLY_DROP_UNIT_BYTE = 0, -+ DPSW_EARLY_DROP_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpsw_early_drop_mode - DPSW early drop mode -+ * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled -+ * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode -+ * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode -+ */ -+enum dpsw_early_drop_mode { -+ DPSW_EARLY_DROP_MODE_NONE = 0, -+ DPSW_EARLY_DROP_MODE_TAIL, -+ DPSW_EARLY_DROP_MODE_WRED -+}; -+ -+/** -+ * struct dpsw_wred_cfg - WRED configuration -+ * @max_threshold: maximum threshold that packets may be discarded. Above this -+ * threshold all packets are discarded; must be less than 2^39; -+ * approximated to be expressed as (x+256)*2^(y-1) due to HW -+ * implementation. -+ * @min_threshold: minimum threshold that packets may be discarded at -+ * @drop_probability: probability that a packet will be discarded (1-100, -+ * associated with the maximum threshold) -+ */ -+struct dpsw_wred_cfg { -+ uint64_t min_threshold; -+ uint64_t max_threshold; -+ uint8_t drop_probability; -+}; -+ -+/** -+ * struct dpsw_early_drop_cfg - early-drop configuration -+ * @drop_mode: drop mode -+ * @units: count units -+ * @yellow: WRED - 'yellow' configuration -+ * @green: WRED - 'green' configuration -+ * @tail_drop_threshold: tail drop threshold -+ */ -+struct dpsw_early_drop_cfg { -+ enum dpsw_early_drop_mode drop_mode; -+ enum dpsw_early_drop_unit units; -+ struct dpsw_wred_cfg yellow; -+ struct dpsw_wred_cfg green; -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpsw_if_tc_set_early_drop -+ * -+ */ -+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf); -+ -+/** -+ * dpsw_if_set_early_drop() - Set interface traffic class early-drop -+ * configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 64 bytes; -+ * Must be cacheline-aligned and DMA-able memory -+ * -+ * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop() -+ * to prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier -+ * @tpid: An additional tag protocol identifier -+ */ -+struct dpsw_custom_tpid_cfg { -+ uint16_t tpid; -+}; -+ -+/** -+ * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @cfg: Tag Protocol identifier -+ * -+ * API Configures a distinct Ethernet type value (or TPID value) -+ * to indicate a VLAN tag in addition to the common -+ * TPID values 0x8100 and 0x88A8. -+ * Two additional TPID's are supported -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg); -+ -+/** -+ * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @cfg: Tag Protocol identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg); -+ -+/** -+ * dpsw_if_enable() - Enable Interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id); -+ -+/** -+ * dpsw_if_disable() - Disable Interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id); -+ -+/** -+ * struct dpsw_if_attr - Structure representing DPSW interface attributes -+ * @num_tcs: Number of traffic classes -+ * @rate: Transmit rate in bits per second -+ * @options: Interface configuration options (bitmap) -+ * @enabled: Indicates if interface is enabled -+ * @accept_all_vlan: The device discards/accepts incoming frames -+ * for VLANs that do not include this interface -+ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device -+ * discards untagged frames or priority-tagged frames received on -+ * this interface; -+ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority- -+ * tagged frames received on this interface are accepted -+ * @qdid: control frames transmit qdid -+ */ -+struct dpsw_if_attr { -+ uint8_t num_tcs; -+ uint32_t rate; -+ uint32_t options; -+ int enabled; -+ int accept_all_vlan; -+ enum dpsw_accepted_frames admit_untagged; -+ uint16_t qdid; -+}; -+ -+/** -+ * dpsw_if_get_attributes() - Function obtains attributes of interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @attr: Returned interface attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_if_attr *attr); -+ -+/** -+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @frame_length: Maximum Frame Length -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t frame_length); -+ -+/** -+ * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @frame_length: Returned maximum Frame Length -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t *frame_length); -+ -+/** -+ * struct dpsw_vlan_cfg - VLAN Configuration -+ * @fdb_id: Forwarding Data Base -+ */ -+struct dpsw_vlan_cfg { -+ uint16_t fdb_id; -+}; -+ -+/** -+ * dpsw_vlan_add() - Adding new VLAN to DPSW. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: VLAN configuration -+ * -+ * Only VLAN ID and FDB ID are required parameters here. -+ * 12 bit VLAN ID is defined in IEEE802.1Q. -+ * Adding a duplicate VLAN ID is not allowed. -+ * FDB ID can be shared across multiple VLANs. Shared learning -+ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs -+ * with same fdb_id -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_cfg *cfg); -+ -+/** -+ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces -+ * @num_ifs: The number of interfaces that are assigned to the egress -+ * list for this VLAN -+ * @if_id: The set of interfaces that are -+ * assigned to the egress list for this VLAN -+ */ -+struct dpsw_vlan_if_cfg { -+ uint16_t num_ifs; -+ uint16_t if_id[DPSW_MAX_IF]; -+}; -+ -+/** -+ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Set of interfaces to add -+ * -+ * It adds only interfaces not belonging to this VLAN yet, -+ * otherwise an error is generated and an entire command is -+ * ignored. This function can be called numerous times always -+ * providing required interfaces delta. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be -+ * transmitted as untagged. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: set of interfaces that should be transmitted as untagged -+ * -+ * These interfaces should already belong to this VLAN. -+ * By default all interfaces are transmitted as tagged. -+ * Providing un-existing interface or untagged interface that is -+ * configured untagged already generates an error and the entire -+ * command is ignored. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be -+ * included in flooding when frame with unknown destination -+ * unicast MAC arrived. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Set of interfaces that should be used for flooding -+ * -+ * These interfaces should belong to this VLAN. By default all -+ * interfaces are included into flooding list. Providing -+ * un-existing interface or an interface that already in the -+ * flooding list generates an error and the entire command is -+ * ignored. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Set of interfaces that should be removed -+ * -+ * Interfaces must belong to this VLAN, otherwise an error -+ * is returned and an the command is ignored -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be -+ * converted from transmitted as untagged to transmit as tagged. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: set of interfaces that should be removed -+ * -+ * Interfaces provided by API have to belong to this VLAN and -+ * configured untagged, otherwise an error is returned and the -+ * command is ignored -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be -+ * removed from the flooding list. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: set of interfaces used for flooding -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove() - Remove an entire VLAN -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * struct dpsw_vlan_attr - VLAN attributes -+ * @fdb_id: Associated FDB ID -+ * @num_ifs: Number of interfaces -+ * @num_untagged_ifs: Number of untagged interfaces -+ * @num_flooding_ifs: Number of flooding interfaces -+ */ -+struct dpsw_vlan_attr { -+ uint16_t fdb_id; -+ uint16_t num_ifs; -+ uint16_t num_untagged_ifs; -+ uint16_t num_flooding_ifs; -+}; -+ -+/** -+ * dpsw_vlan_get_attributes() - Get VLAN attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @attr: Returned DPSW attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_attr *attr); -+ -+/** -+ * dpsw_vlan_get_if() - Get interfaces belong to this VLAN -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Returned set of interfaces belong to this VLAN -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Returned set of flooding interfaces -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as -+ * untagged -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Returned set of untagged interfaces -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * struct dpsw_fdb_cfg - FDB Configuration -+ * @num_fdb_entries: Number of FDB entries -+ * @fdb_aging_time: Aging time in seconds -+ */ -+struct dpsw_fdb_cfg { -+ uint16_t num_fdb_entries; -+ uint16_t fdb_aging_time; -+}; -+ -+/** -+ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for -+ * the reference -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Returned Forwarding Database Identifier -+ * @cfg: FDB Configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *fdb_id, -+ const struct dpsw_fdb_cfg *cfg); -+ -+/** -+ * dpsw_fdb_remove() - Remove FDB from switch -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id); -+ -+/** -+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic -+ * @DPSW_FDB_ENTRY_STATIC: Static entry -+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry -+ */ -+enum dpsw_fdb_entry_type { -+ DPSW_FDB_ENTRY_STATIC = 0, -+ DPSW_FDB_ENTRY_DINAMIC = 1 -+}; -+ -+/** -+ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration -+ * @type: Select static or dynamic entry -+ * @mac_addr: MAC address -+ * @if_egress: Egress interface ID -+ */ -+struct dpsw_fdb_unicast_cfg { -+ enum dpsw_fdb_entry_type type; -+ uint8_t mac_addr[6]; -+ uint16_t if_egress; -+}; -+ -+/** -+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Unicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by -+ * unicast Ethernet address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Returned unicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_unicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Unicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg); -+ -+/** -+ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration -+ * @type: Select static or dynamic entry -+ * @mac_addr: MAC address -+ * @num_ifs: Number of external and internal interfaces -+ * @if_id: Egress interface IDs -+ */ -+struct dpsw_fdb_multicast_cfg { -+ enum dpsw_fdb_entry_type type; -+ uint8_t mac_addr[6]; -+ uint16_t num_ifs; -+ uint16_t if_id[DPSW_MAX_IF]; -+}; -+ -+/** -+ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Multicast entry configuration -+ * -+ * If group doesn't exist, it will be created. -+ * It adds only interfaces not belonging to this multicast group -+ * yet, otherwise error will be generated and the command is -+ * ignored. -+ * This function may be called numerous times always providing -+ * required interfaces delta. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet -+ * address. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Returned multicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_multicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast -+ * group. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Multicast entry configuration -+ * -+ * Interfaces provided by this API have to exist in the group, -+ * otherwise an error will be returned and an entire command -+ * ignored. If there is no interface left in the group, -+ * an entire group is deleted -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg); -+ -+/** -+ * enum dpsw_fdb_learning_mode - Auto-learning modes -+ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning -+ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning -+ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU -+ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU -+ * -+ * NONE - SECURE LEARNING -+ * SMAC found DMAC found CTLU Action -+ * v v Forward frame to -+ * 1. DMAC destination -+ * - v Forward frame to -+ * 1. DMAC destination -+ * 2. Control interface -+ * v - Forward frame to -+ * 1. Flooding list of interfaces -+ * - - Forward frame to -+ * 1. Flooding list of interfaces -+ * 2. Control interface -+ * SECURE LEARING -+ * SMAC found DMAC found CTLU Action -+ * v v Forward frame to -+ * 1. DMAC destination -+ * - v Forward frame to -+ * 1. Control interface -+ * v - Forward frame to -+ * 1. Flooding list of interfaces -+ * - - Forward frame to -+ * 1. Control interface -+ */ -+enum dpsw_fdb_learning_mode { -+ DPSW_FDB_LEARNING_MODE_DIS = 0, -+ DPSW_FDB_LEARNING_MODE_HW = 1, -+ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2, -+ DPSW_FDB_LEARNING_MODE_SECURE = 3 -+}; -+ -+/** -+ * dpsw_fdb_set_learning_mode() - Define FDB learning mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @mode: learning mode -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ enum dpsw_fdb_learning_mode mode); -+ -+/** -+ * struct dpsw_fdb_attr - FDB Attributes -+ * @max_fdb_entries: Number of FDB entries -+ * @fdb_aging_time: Aging time in seconds -+ * @learning_mode: Learning mode -+ * @num_fdb_mc_groups: Current number of multicast groups -+ * @max_fdb_mc_groups: Maximum number of multicast groups -+ */ -+struct dpsw_fdb_attr { -+ uint16_t max_fdb_entries; -+ uint16_t fdb_aging_time; -+ enum dpsw_fdb_learning_mode learning_mode; -+ uint16_t num_fdb_mc_groups; -+ uint16_t max_fdb_mc_groups; -+}; -+ -+/** -+ * dpsw_fdb_get_attributes() - Get FDB attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @attr: Returned FDB attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_attr *attr); -+ -+/** -+ * struct dpsw_acl_cfg - ACL Configuration -+ * @max_entries: Number of FDB entries -+ */ -+struct dpsw_acl_cfg { -+ uint16_t max_entries; -+}; -+ -+/** -+ * struct dpsw_acl_fields - ACL fields. -+ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast, -+ * slow protocols, MVRP, STP -+ * @l2_source_mac: Source MAC address -+ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following -+ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae, -+ * Q-in-Q, IPv4, IPv6, PPPoE -+ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload -+ * @l2_vlan_id: layer 2 VLAN ID -+ * @l2_ether_type: layer 2 Ethernet type -+ * @l3_dscp: Layer 3 differentiated services code point -+ * @l3_protocol: Tells the Network layer at the destination host, to which -+ * Protocol this packet belongs to. The following protocol are -+ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6 -+ * (encapsulation), GRE, PTP -+ * @l3_source_ip: Source IPv4 IP -+ * @l3_dest_ip: Destination IPv4 IP -+ * @l4_source_port: Source TCP/UDP Port -+ * @l4_dest_port: Destination TCP/UDP Port -+ */ -+struct dpsw_acl_fields { -+ uint8_t l2_dest_mac[6]; -+ uint8_t l2_source_mac[6]; -+ uint16_t l2_tpid; -+ uint8_t l2_pcp_dei; -+ uint16_t l2_vlan_id; -+ uint16_t l2_ether_type; -+ uint8_t l3_dscp; -+ uint8_t l3_protocol; -+ uint32_t l3_source_ip; -+ uint32_t l3_dest_ip; -+ uint16_t l4_source_port; -+ uint16_t l4_dest_port; -+}; -+ -+/** -+ * struct dpsw_acl_key - ACL key -+ * @match: Match fields -+ * @mask: Mask: b'1 - valid, b'0 don't care -+ */ -+struct dpsw_acl_key { -+ struct dpsw_acl_fields match; -+ struct dpsw_acl_fields mask; -+}; -+ -+/** -+ * enum dpsw_acl_action -+ * @DPSW_ACL_ACTION_DROP: Drop frame -+ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port -+ * @DPSW_ACL_ACTION_ACCEPT: Accept frame -+ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface -+ */ -+enum dpsw_acl_action { -+ DPSW_ACL_ACTION_DROP, -+ DPSW_ACL_ACTION_REDIRECT, -+ DPSW_ACL_ACTION_ACCEPT, -+ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF -+}; -+ -+/** -+ * struct dpsw_acl_result - ACL action -+ * @action: Action should be taken when ACL entry hit -+ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for -+ * action -+ */ -+struct dpsw_acl_result { -+ enum dpsw_acl_action action; -+ uint16_t if_id; -+}; -+ -+/** -+ * struct dpsw_acl_entry_cfg - ACL entry -+ * @key_iova: I/O virtual address of DMA-able memory filled with key after call -+ * to dpsw_acl_prepare_entry_cfg() -+ * @result: Required action when entry hit occurs -+ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change -+ * during the lifetime of a Policy. It is user responsibility to -+ * space the priorities according to consequent rule additions. -+ */ -+struct dpsw_acl_entry_cfg { -+ uint64_t key_iova; -+ struct dpsw_acl_result result; -+ int precedence; -+}; -+ -+/** -+ * dpsw_acl_add() - Adds ACL to L2 switch. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: Returned ACL ID, for the future reference -+ * @cfg: ACL configuration -+ * -+ * Create Access Control List. Multiple ACLs can be created and -+ * co-exist in L2 switch -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *acl_id, -+ const struct dpsw_acl_cfg *cfg); -+ -+/** -+ * dpsw_acl_remove() - Removes ACL from L2 switch. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id); -+ -+/** -+ * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL. -+ * @key: key -+ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before adding or removing acl_entry -+ * -+ */ -+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, -+ uint8_t *entry_cfg_buf); -+ -+/** -+ * dpsw_acl_add_entry() - Adds an entry to ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: entry configuration -+ * -+ * warning: This function has to be called after dpsw_acl_set_entry_cfg() -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg); -+ -+/** -+ * dpsw_acl_remove_entry() - Removes an entry from ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: entry configuration -+ * -+ * warning: This function has to be called after dpsw_acl_set_entry_cfg() -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg); -+ -+/** -+ * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL -+ * @num_ifs: Number of interfaces -+ * @if_id: List of interfaces -+ */ -+struct dpsw_acl_if_cfg { -+ uint16_t num_ifs; -+ uint16_t if_id[DPSW_MAX_IF]; -+}; -+ -+/** -+ * dpsw_acl_add_if() - Associate interface/interfaces with ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: interfaces list -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg); -+ -+/** -+ * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: interfaces list -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg); -+ -+/** -+ * struct dpsw_acl_attr - ACL Attributes -+ * @max_entries: Max number of ACL entries -+ * @num_entries: Number of used ACL entries -+ * @num_ifs: Number of interfaces associated with ACL -+ */ -+struct dpsw_acl_attr { -+ uint16_t max_entries; -+ uint16_t num_entries; -+ uint16_t num_ifs; -+}; -+ -+/** -+* dpsw_acl_get_attributes() - Get specific counter of particular interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* @acl_id: ACL Identifier -+* @attr: Returned ACL attributes -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ struct dpsw_acl_attr *attr); -+/** -+* struct dpsw_ctrl_if_attr - Control interface attributes -+* @rx_fqid: Receive FQID -+* @rx_err_fqid: Receive error FQID -+* @tx_err_conf_fqid: Transmit error and confirmation FQID -+*/ -+struct dpsw_ctrl_if_attr { -+ uint32_t rx_fqid; -+ uint32_t rx_err_fqid; -+ uint32_t tx_err_conf_fqid; -+}; -+ -+/** -+* dpsw_ctrl_if_get_attributes() - Obtain control interface attributes -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* @attr: Returned control interface attributes -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_ctrl_if_attr *attr); -+ -+/** -+ * Maximum number of DPBP -+ */ -+#define DPSW_MAX_DPBP 8 -+ -+/** -+ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration -+ * @num_dpbp: Number of DPBPs -+ * @pools: Array of buffer pools parameters; The number of valid entries -+ * must match 'num_dpbp' value -+ */ -+struct dpsw_ctrl_if_pools_cfg { -+ uint8_t num_dpbp; -+ /** -+ * struct pools - Buffer pools parameters -+ * @dpbp_id: DPBP object ID -+ * @buffer_size: Buffer size -+ * @backup_pool: Backup pool -+ */ -+ struct { -+ int dpbp_id; -+ uint16_t buffer_size; -+ int backup_pool; -+ } pools[DPSW_MAX_DPBP]; -+}; -+ -+/** -+* dpsw_ctrl_if_set_pools() - Set control interface buffer pools -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* @cfg: buffer pools configuration -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_ctrl_if_pools_cfg *cfg); -+ -+/** -+* dpsw_ctrl_if_enable() - Enable control interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+* dpsw_ctrl_if_disable() - Function disables control interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+#endif /* __FSL_DPSW_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h b/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h -new file mode 100644 -index 0000000..c65fe38 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h -@@ -0,0 +1,916 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPSW_CMD_H -+#define __FSL_DPSW_CMD_H -+ -+/* DPSW Version */ -+#define DPSW_VER_MAJOR 7 -+#define DPSW_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPSW_CMDID_CLOSE 0x800 -+#define DPSW_CMDID_OPEN 0x802 -+#define DPSW_CMDID_CREATE 0x902 -+#define DPSW_CMDID_DESTROY 0x900 -+ -+#define DPSW_CMDID_ENABLE 0x002 -+#define DPSW_CMDID_DISABLE 0x003 -+#define DPSW_CMDID_GET_ATTR 0x004 -+#define DPSW_CMDID_RESET 0x005 -+#define DPSW_CMDID_IS_ENABLED 0x006 -+ -+#define DPSW_CMDID_SET_IRQ 0x010 -+#define DPSW_CMDID_GET_IRQ 0x011 -+#define DPSW_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPSW_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPSW_CMDID_SET_IRQ_MASK 0x014 -+#define DPSW_CMDID_GET_IRQ_MASK 0x015 -+#define DPSW_CMDID_GET_IRQ_STATUS 0x016 -+#define DPSW_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPSW_CMDID_SET_REFLECTION_IF 0x022 -+ -+#define DPSW_CMDID_ADD_CUSTOM_TPID 0x024 -+ -+#define DPSW_CMDID_REMOVE_CUSTOM_TPID 0x026 -+ -+#define DPSW_CMDID_IF_SET_TCI 0x030 -+#define DPSW_CMDID_IF_SET_STP 0x031 -+#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES 0x032 -+#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN 0x033 -+#define DPSW_CMDID_IF_GET_COUNTER 0x034 -+#define DPSW_CMDID_IF_SET_COUNTER 0x035 -+#define DPSW_CMDID_IF_SET_TX_SELECTION 0x036 -+#define DPSW_CMDID_IF_ADD_REFLECTION 0x037 -+#define DPSW_CMDID_IF_REMOVE_REFLECTION 0x038 -+#define DPSW_CMDID_IF_SET_FLOODING_METERING 0x039 -+#define DPSW_CMDID_IF_SET_METERING 0x03A -+#define DPSW_CMDID_IF_SET_EARLY_DROP 0x03B -+ -+#define DPSW_CMDID_IF_ENABLE 0x03D -+#define DPSW_CMDID_IF_DISABLE 0x03E -+ -+#define DPSW_CMDID_IF_GET_ATTR 0x042 -+ -+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH 0x044 -+#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH 0x045 -+#define DPSW_CMDID_IF_GET_LINK_STATE 0x046 -+#define DPSW_CMDID_IF_SET_FLOODING 0x047 -+#define DPSW_CMDID_IF_SET_BROADCAST 0x048 -+#define DPSW_CMDID_IF_SET_MULTICAST 0x049 -+#define DPSW_CMDID_IF_GET_TCI 0x04A -+ -+#define DPSW_CMDID_IF_SET_LINK_CFG 0x04C -+ -+#define DPSW_CMDID_VLAN_ADD 0x060 -+#define DPSW_CMDID_VLAN_ADD_IF 0x061 -+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED 0x062 -+#define DPSW_CMDID_VLAN_ADD_IF_FLOODING 0x063 -+#define DPSW_CMDID_VLAN_REMOVE_IF 0x064 -+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED 0x065 -+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING 0x066 -+#define DPSW_CMDID_VLAN_REMOVE 0x067 -+#define DPSW_CMDID_VLAN_GET_IF 0x068 -+#define DPSW_CMDID_VLAN_GET_IF_FLOODING 0x069 -+#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED 0x06A -+#define DPSW_CMDID_VLAN_GET_ATTRIBUTES 0x06B -+ -+#define DPSW_CMDID_FDB_GET_MULTICAST 0x080 -+#define DPSW_CMDID_FDB_GET_UNICAST 0x081 -+#define DPSW_CMDID_FDB_ADD 0x082 -+#define DPSW_CMDID_FDB_REMOVE 0x083 -+#define DPSW_CMDID_FDB_ADD_UNICAST 0x084 -+#define DPSW_CMDID_FDB_REMOVE_UNICAST 0x085 -+#define DPSW_CMDID_FDB_ADD_MULTICAST 0x086 -+#define DPSW_CMDID_FDB_REMOVE_MULTICAST 0x087 -+#define DPSW_CMDID_FDB_SET_LEARNING_MODE 0x088 -+#define DPSW_CMDID_FDB_GET_ATTR 0x089 -+ -+#define DPSW_CMDID_ACL_ADD 0x090 -+#define DPSW_CMDID_ACL_REMOVE 0x091 -+#define DPSW_CMDID_ACL_ADD_ENTRY 0x092 -+#define DPSW_CMDID_ACL_REMOVE_ENTRY 0x093 -+#define DPSW_CMDID_ACL_ADD_IF 0x094 -+#define DPSW_CMDID_ACL_REMOVE_IF 0x095 -+#define DPSW_CMDID_ACL_GET_ATTR 0x096 -+ -+#define DPSW_CMDID_CTRL_IF_GET_ATTR 0x0A0 -+#define DPSW_CMDID_CTRL_IF_SET_POOLS 0x0A1 -+#define DPSW_CMDID_CTRL_IF_ENABLE 0x0A2 -+#define DPSW_CMDID_CTRL_IF_DISABLE 0x0A3 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_OPEN(cmd, dpsw_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpsw_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->adv.max_fdbs);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->adv.max_meters_per_if);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_component_type, \ -+ cfg->adv.component_type);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_vlans);\ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_fdb_entries);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.fdb_aging_time);\ -+ MC_CMD_OP(cmd, 1, 48, 16, uint16_t, cfg->adv.max_fdb_mc_groups);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->num_ifs);\ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->max_fdbs);\ -+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->num_fdbs);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->max_vlans);\ -+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_vlans);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->max_fdb_entries);\ -+ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->fdb_aging_time);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, attr->mem_size);\ -+ MC_RSP_OP(cmd, 2, 48, 16, uint16_t, attr->max_fdb_mc_groups);\ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\ -+ MC_RSP_OP(cmd, 4, 0, 8, uint8_t, attr->max_meters_per_if);\ -+ MC_RSP_OP(cmd, 4, 8, 4, enum dpsw_component_type, \ -+ attr->component_type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_REFLECTION_IF(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_BROADCAST(cmd, if_id, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_MULTICAST(cmd, if_id, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 12, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 28, 1, uint8_t, cfg->dei);\ -+ MC_CMD_OP(cmd, 0, 29, 3, uint8_t, cfg->pcp);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_TCI(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_TCI(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, cfg->dei);\ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, cfg->pcp);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_STP(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_stp_state, cfg->state);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_accepted_frames, cfg->type);\ -+ MC_CMD_OP(cmd, 0, 20, 4, enum dpsw_action, cfg->unaccept_act);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, accept_all);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, counter);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 3, enum dpsw_priority_selector, \ -+ cfg->priority_selector);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->tc_id[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->tc_id[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->tc_id[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->tc_id[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->tc_id[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->tc_id[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, uint8_t, cfg->tc_id[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, uint8_t, cfg->tc_id[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[0].mode);\ -+ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[1].mode);\ -+ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[2].mode);\ -+ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[3].mode);\ -+ MC_CMD_OP(cmd, 4, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 4, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[4].mode);\ -+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 4, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[5].mode);\ -+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 5, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[6].mode);\ -+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 5, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[7].mode);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\ -+ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id);\ -+ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\ -+ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_PREP_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \ -+ MC_PREP_OP(ext, 0, 2, 2, \ -+ enum dpsw_early_drop_unit, cfg->units); \ -+ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_EXT_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \ -+ MC_EXT_OP(ext, 0, 2, 2, \ -+ enum dpsw_early_drop_unit, cfg->units); \ -+ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, if_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_ENABLE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_DISABLE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_ATTR(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 4, enum dpsw_accepted_frames, \ -+ attr->admit_untagged);\ -+ MC_RSP_OP(cmd, 0, 5, 1, int, attr->enabled);\ -+ MC_RSP_OP(cmd, 0, 6, 1, int, attr->accept_all_vlan);\ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->num_tcs);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qdid);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->rate);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, frame_length);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, frame_length) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_LINK_STATE(cmd, state) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+#define DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_REMOVE(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->fdb_id); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_ifs); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_untagged_ifs); \ -+ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->num_flooding_ifs); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_IF(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_IF(cmd, cfg) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_IF_UNTAGGED(cmd, cfg) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs) -+ -+/* param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_ADD(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->fdb_aging_time);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->num_fdb_entries);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_ADD(cmd, fdb_id) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, fdb_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_REMOVE(cmd, fdb_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint16_t, cfg->if_egress);\ -+ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_GET_UNICAST(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\ -+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\ -+ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->num_ifs);\ -+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_fdb_learning_mode, mode);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->max_fdb_entries);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->fdb_aging_time);\ -+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_fdb_mc_groups);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_fdb_mc_groups);\ -+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_learning_mode, \ -+ attr->learning_mode);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_ADD(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->max_entries) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_ACL_ADD(cmd, acl_id) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, acl_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_REMOVE(cmd, acl_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_PREP_ACL_ENTRY(ext, key) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\ -+ MC_PREP_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\ -+ MC_PREP_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\ -+ MC_PREP_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\ -+ MC_PREP_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\ -+ MC_PREP_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\ -+ MC_PREP_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\ -+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\ -+ MC_PREP_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\ -+ MC_PREP_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\ -+ MC_PREP_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\ -+ MC_PREP_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\ -+ MC_PREP_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\ -+ MC_PREP_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\ -+ MC_PREP_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\ -+ MC_PREP_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\ -+ MC_PREP_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\ -+ MC_PREP_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\ -+ MC_PREP_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\ -+ MC_PREP_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\ -+ MC_PREP_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\ -+ MC_PREP_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\ -+ MC_PREP_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\ -+ MC_PREP_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\ -+ MC_PREP_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\ -+ MC_PREP_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\ -+ MC_PREP_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\ -+ MC_PREP_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\ -+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\ -+ MC_PREP_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\ -+ MC_PREP_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\ -+ MC_PREP_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\ -+ MC_PREP_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\ -+ MC_PREP_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\ -+ MC_PREP_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\ -+ MC_PREP_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\ -+ MC_PREP_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\ -+ MC_PREP_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\ -+ MC_PREP_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\ -+ MC_PREP_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\ -+ MC_PREP_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\ -+ MC_PREP_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\ -+ MC_PREP_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\ -+ MC_PREP_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_EXT_ACL_ENTRY(ext, key) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\ -+ MC_EXT_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\ -+ MC_EXT_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\ -+ MC_EXT_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\ -+ MC_EXT_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\ -+ MC_EXT_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\ -+ MC_EXT_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\ -+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\ -+ MC_EXT_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\ -+ MC_EXT_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\ -+ MC_EXT_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\ -+ MC_EXT_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\ -+ MC_EXT_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\ -+ MC_EXT_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\ -+ MC_EXT_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\ -+ MC_EXT_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\ -+ MC_EXT_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\ -+ MC_EXT_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\ -+ MC_EXT_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\ -+ MC_EXT_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\ -+ MC_EXT_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\ -+ MC_EXT_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\ -+ MC_EXT_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\ -+ MC_EXT_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\ -+ MC_EXT_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\ -+ MC_EXT_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\ -+ MC_EXT_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\ -+ MC_EXT_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\ -+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\ -+ MC_EXT_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\ -+ MC_EXT_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\ -+ MC_EXT_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\ -+ MC_EXT_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\ -+ MC_EXT_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\ -+ MC_EXT_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\ -+ MC_EXT_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\ -+ MC_EXT_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\ -+ MC_EXT_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\ -+ MC_EXT_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\ -+ MC_EXT_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\ -+ MC_EXT_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\ -+ MC_EXT_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\ -+ MC_EXT_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\ -+ MC_EXT_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_GET_ATTR(cmd, acl_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_ACL_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_entries);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_entries);\ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_ifs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rx_fqid);\ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, attr->rx_err_fqid);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tx_err_conf_fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_CTRL_IF_SET_POOLS(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \ -+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\ -+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\ -+} while (0) -+ -+#endif /* __FSL_DPSW_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_mc_cmd.h b/drivers/net/dpaa2/mc/fsl_mc_cmd.h -new file mode 100644 -index 0000000..ac4f2b4 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_mc_cmd.h -@@ -0,0 +1,221 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_MC_CMD_H -+#define __FSL_MC_CMD_H -+ -+#define MC_CMD_NUM_OF_PARAMS 7 -+ -+#define MAKE_UMASK64(_width) \ -+ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 :\ -+ (uint64_t)-1)) -+static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val) -+{ -+ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset); -+} -+ -+static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width) -+{ -+ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width)); -+} -+ -+struct mc_command { -+ uint64_t header; -+ uint64_t params[MC_CMD_NUM_OF_PARAMS]; -+}; -+ -+/** -+ * enum mc_cmd_status - indicates MC status at command response -+ * @MC_CMD_STATUS_OK: Completed successfully -+ * @MC_CMD_STATUS_READY: Ready to be processed -+ * @MC_CMD_STATUS_AUTH_ERR: Authentication error -+ * @MC_CMD_STATUS_NO_PRIVILEGE: No privilege -+ * @MC_CMD_STATUS_DMA_ERR: DMA or I/O error -+ * @MC_CMD_STATUS_CONFIG_ERR: Configuration error -+ * @MC_CMD_STATUS_TIMEOUT: Operation timed out -+ * @MC_CMD_STATUS_NO_RESOURCE: No resources -+ * @MC_CMD_STATUS_NO_MEMORY: No memory available -+ * @MC_CMD_STATUS_BUSY: Device is busy -+ * @MC_CMD_STATUS_UNSUPPORTED_OP: Unsupported operation -+ * @MC_CMD_STATUS_INVALID_STATE: Invalid state -+ */ -+enum mc_cmd_status { -+ MC_CMD_STATUS_OK = 0x0, -+ MC_CMD_STATUS_READY = 0x1, -+ MC_CMD_STATUS_AUTH_ERR = 0x3, -+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, -+ MC_CMD_STATUS_DMA_ERR = 0x5, -+ MC_CMD_STATUS_CONFIG_ERR = 0x6, -+ MC_CMD_STATUS_TIMEOUT = 0x7, -+ MC_CMD_STATUS_NO_RESOURCE = 0x8, -+ MC_CMD_STATUS_NO_MEMORY = 0x9, -+ MC_CMD_STATUS_BUSY = 0xA, -+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, -+ MC_CMD_STATUS_INVALID_STATE = 0xC -+}; -+ -+/* MC command flags */ -+ -+/** -+ * High priority flag -+ */ -+#define MC_CMD_FLAG_PRI 0x00008000 -+/** -+ * Command completion flag -+ */ -+#define MC_CMD_FLAG_INTR_DIS 0x01000000 -+ -+/** -+ * Command ID field offset -+ */ -+#define MC_CMD_HDR_CMDID_O 52 -+/** -+ * Command ID field size -+ */ -+#define MC_CMD_HDR_CMDID_S 12 -+/** -+ * Token field offset -+ */ -+#define MC_CMD_HDR_TOKEN_O 38 -+/** -+ * Token field size -+ */ -+#define MC_CMD_HDR_TOKEN_S 10 -+/** -+ * Status field offset -+ */ -+#define MC_CMD_HDR_STATUS_O 16 -+/** -+ * Status field size -+ */ -+#define MC_CMD_HDR_STATUS_S 8 -+/** -+ * Flags field offset -+ */ -+#define MC_CMD_HDR_FLAGS_O 0 -+/** -+ * Flags field size -+ */ -+#define MC_CMD_HDR_FLAGS_S 32 -+/** -+ * Command flags mask -+ */ -+#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00 -+ -+#define MC_CMD_HDR_READ_STATUS(_hdr) \ -+ ((enum mc_cmd_status)mc_dec((_hdr), \ -+ MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S)) -+ -+#define MC_CMD_HDR_READ_TOKEN(_hdr) \ -+ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S)) -+ -+#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \ -+ ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg))) -+ -+#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \ -+ (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width))) -+ -+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ -+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) -+ -+#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \ -+ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width))) -+ -+static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ uint64_t hdr; -+ -+ hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id); -+ hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S, -+ (cmd_flags & MC_CMD_HDR_FLAGS_MASK)); -+ hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token); -+ hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S, -+ MC_CMD_STATUS_READY); -+ -+ return hdr; -+} -+ -+/** -+ * mc_write_command - writes a command to a Management Complex (MC) portal -+ * -+ * @portal: pointer to an MC portal -+ * @cmd: pointer to a filled command -+ */ -+static inline void mc_write_command(struct mc_command __iomem *portal, -+ struct mc_command *cmd) -+{ -+ int i; -+ uint32_t word; -+ -+ /* copy command parameters into the portal */ -+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) -+ iowrite64(cmd->params[i], &portal->params[i]); -+ -+ /* submit the command by writing the header */ -+ word = (uint32_t)mc_dec(cmd->header, 32, 32); -+ iowrite32(word, (((uint32_t *)&portal->header) + 1)); -+ -+ word = (uint32_t)mc_dec(cmd->header, 0, 32); -+ iowrite32(word, (uint32_t *)&portal->header); -+} -+ -+/** -+ * mc_read_response - reads the response for the last MC command from a -+ * Management Complex (MC) portal -+ * -+ * @portal: pointer to an MC portal -+ * @resp: pointer to command response buffer -+ * -+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise. -+ */ -+static inline enum mc_cmd_status mc_read_response( -+ struct mc_command __iomem *portal, -+ struct mc_command *resp) -+{ -+ int i; -+ enum mc_cmd_status status; -+ -+ /* Copy command response header from MC portal: */ -+ resp->header = ioread64(&portal->header); -+ status = MC_CMD_HDR_READ_STATUS(resp->header); -+ if (status != MC_CMD_STATUS_OK) -+ return status; -+ -+ /* Copy command response data from MC portal: */ -+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) -+ resp->params[i] = ioread64(&portal->params[i]); -+ -+ return status; -+} -+ -+#endif /* __FSL_MC_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_mc_sys.h b/drivers/net/dpaa2/mc/fsl_mc_sys.h -new file mode 100644 -index 0000000..769c129 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_mc_sys.h -@@ -0,0 +1,95 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_MC_SYS_H -+#define _FSL_MC_SYS_H -+ -+#ifdef __linux_driver__ -+ -+#include -+#include -+#include -+ -+struct fsl_mc_io { -+ void *regs; -+}; -+ -+#ifndef ENOTSUP -+#define ENOTSUP 95 -+#endif -+ -+#define ioread64(_p) readq(_p) -+#define iowrite64(_v, _p) writeq(_v, _p) -+ -+#else /* __linux_driver__ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define cpu_to_le64(x) __cpu_to_le64(x) -+#ifndef dmb -+#define dmb() __asm__ __volatile__ ("" : : : "memory") -+#endif -+#define __iormb() dmb() -+#define __iowmb() dmb() -+#define __arch_getq(a) (*(volatile unsigned long *)(a)) -+#define __arch_putq(v, a) (*(volatile unsigned long *)(a) = (v)) -+#define __arch_putq32(v, a) (*(volatile unsigned int *)(a) = (v)) -+#define readq(c) ({ uint64_t __v = __arch_getq(c); __iormb(); __v; }) -+#define writeq(v, c) ({ uint64_t __v = v; __iowmb(); __arch_putq(__v, c); __v; }) -+#define writeq32(v, c) ({ uint32_t __v = v; __iowmb(); __arch_putq32(__v, c); __v; }) -+#define ioread64(_p) readq(_p) -+#define iowrite64(_v, _p) writeq(_v, _p) -+#define iowrite32(_v, _p) writeq32(_v, _p) -+#define __iomem -+ -+struct fsl_mc_io { -+ void *regs; -+}; -+ -+#ifndef ENOTSUP -+#define ENOTSUP 95 -+#endif -+ -+/*GPP is supposed to use MC commands with low priority*/ -+#define CMD_PRI_LOW 0 /*!< Low Priority command indication */ -+ -+struct mc_command; -+ -+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd); -+ -+#endif /* __linux_driver__ */ -+ -+#endif /* _FSL_MC_SYS_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_net.h b/drivers/net/dpaa2/mc/fsl_net.h -new file mode 100644 -index 0000000..43825b8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_net.h -@@ -0,0 +1,480 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_NET_H -+#define __FSL_NET_H -+ -+#define LAST_HDR_INDEX 0xFFFFFFFF -+ -+/*****************************************************************************/ -+/* Protocol fields */ -+/*****************************************************************************/ -+ -+/************************* Ethernet fields *********************************/ -+#define NH_FLD_ETH_DA (1) -+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) -+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) -+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) -+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) -+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) -+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) -+ -+#define NH_FLD_ETH_ADDR_SIZE 6 -+ -+/*************************** VLAN fields ***********************************/ -+#define NH_FLD_VLAN_VPRI (1) -+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) -+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) -+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) -+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) -+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) -+ -+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ -+ NH_FLD_VLAN_CFI | \ -+ NH_FLD_VLAN_VID) -+ -+/************************ IP (generic) fields ******************************/ -+#define NH_FLD_IP_VER (1) -+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) -+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) -+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) -+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) -+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) -+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) -+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) -+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) -+ -+#define NH_FLD_IP_PROTO_SIZE 1 -+ -+/***************************** IPV4 fields *********************************/ -+#define NH_FLD_IPV4_VER (1) -+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) -+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) -+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) -+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) -+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) -+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) -+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) -+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) -+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) -+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) -+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) -+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) -+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) -+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) -+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) -+ -+#define NH_FLD_IPV4_ADDR_SIZE 4 -+#define NH_FLD_IPV4_PROTO_SIZE 1 -+ -+/***************************** IPV6 fields *********************************/ -+#define NH_FLD_IPV6_VER (1) -+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) -+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) -+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) -+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) -+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) -+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) -+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) -+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) -+ -+#define NH_FLD_IPV6_ADDR_SIZE 16 -+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 -+ -+/***************************** ICMP fields *********************************/ -+#define NH_FLD_ICMP_TYPE (1) -+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) -+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) -+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) -+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) -+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) -+ -+#define NH_FLD_ICMP_CODE_SIZE 1 -+#define NH_FLD_ICMP_TYPE_SIZE 1 -+ -+/***************************** IGMP fields *********************************/ -+#define NH_FLD_IGMP_VERSION (1) -+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) -+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) -+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) -+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) -+ -+/***************************** TCP fields **********************************/ -+#define NH_FLD_TCP_PORT_SRC (1) -+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) -+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) -+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) -+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) -+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) -+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) -+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) -+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) -+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) -+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) -+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) -+ -+#define NH_FLD_TCP_PORT_SIZE 2 -+ -+/***************************** UDP fields **********************************/ -+#define NH_FLD_UDP_PORT_SRC (1) -+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) -+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) -+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) -+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) -+ -+#define NH_FLD_UDP_PORT_SIZE 2 -+ -+/*************************** UDP-lite fields *******************************/ -+#define NH_FLD_UDP_LITE_PORT_SRC (1) -+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) -+#define NH_FLD_UDP_LITE_ALL_FIELDS \ -+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) -+ -+#define NH_FLD_UDP_LITE_PORT_SIZE 2 -+ -+/*************************** UDP-encap-ESP fields **************************/ -+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) -+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) -+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) -+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) -+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) -+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) -+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ -+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) -+ -+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 -+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 -+ -+/***************************** SCTP fields *********************************/ -+#define NH_FLD_SCTP_PORT_SRC (1) -+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) -+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) -+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) -+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) -+ -+#define NH_FLD_SCTP_PORT_SIZE 2 -+ -+/***************************** DCCP fields *********************************/ -+#define NH_FLD_DCCP_PORT_SRC (1) -+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) -+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) -+ -+#define NH_FLD_DCCP_PORT_SIZE 2 -+ -+/***************************** IPHC fields *********************************/ -+#define NH_FLD_IPHC_CID (1) -+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) -+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) -+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) -+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) -+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) -+ -+/***************************** SCTP fields *********************************/ -+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) -+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) -+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) -+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) -+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) -+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) -+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) -+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) -+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) -+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) -+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ -+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) -+ -+/*************************** L2TPV2 fields *********************************/ -+#define NH_FLD_L2TPV2_TYPE_BIT (1) -+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) -+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) -+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) -+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) -+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) -+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) -+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) -+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) -+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) -+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) -+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) -+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) -+#define NH_FLD_L2TPV2_ALL_FIELDS \ -+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) -+ -+/*************************** L2TPV3 fields *********************************/ -+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) -+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) -+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) -+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) -+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) -+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) -+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) -+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) -+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) -+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ -+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) -+ -+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) -+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) -+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) -+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) -+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ -+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) -+ -+/**************************** PPP fields ***********************************/ -+#define NH_FLD_PPP_PID (1) -+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) -+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) -+ -+/************************** PPPoE fields ***********************************/ -+#define NH_FLD_PPPOE_VER (1) -+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) -+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) -+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) -+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) -+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) -+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) -+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) -+ -+/************************* PPP-Mux fields **********************************/ -+#define NH_FLD_PPPMUX_PID (1) -+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) -+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) -+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) -+ -+/*********************** PPP-Mux sub-frame fields **************************/ -+#define NH_FLD_PPPMUX_SUBFRM_PFF (1) -+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) -+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) -+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) -+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) -+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ -+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) -+ -+/*************************** LLC fields ************************************/ -+#define NH_FLD_LLC_DSAP (1) -+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) -+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) -+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) -+ -+/*************************** NLPID fields **********************************/ -+#define NH_FLD_NLPID_NLPID (1) -+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) -+ -+/*************************** SNAP fields ***********************************/ -+#define NH_FLD_SNAP_OUI (1) -+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) -+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) -+ -+/*************************** LLC SNAP fields *******************************/ -+#define NH_FLD_LLC_SNAP_TYPE (1) -+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) -+ -+#define NH_FLD_ARP_HTYPE (1) -+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) -+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) -+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) -+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) -+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) -+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) -+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) -+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) -+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) -+ -+/*************************** RFC2684 fields ********************************/ -+#define NH_FLD_RFC2684_LLC (1) -+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) -+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) -+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) -+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) -+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) -+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) -+ -+/*************************** User defined fields ***************************/ -+#define NH_FLD_USER_DEFINED_SRCPORT (1) -+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) -+#define NH_FLD_USER_DEFINED_ALL_FIELDS \ -+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) -+ -+/*************************** Payload fields ********************************/ -+#define NH_FLD_PAYLOAD_BUFFER (1) -+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) -+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) -+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) -+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) -+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) -+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) -+ -+/*************************** GRE fields ************************************/ -+#define NH_FLD_GRE_TYPE (1) -+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) -+ -+/*************************** MINENCAP fields *******************************/ -+#define NH_FLD_MINENCAP_SRC_IP (1) -+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) -+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) -+#define NH_FLD_MINENCAP_ALL_FIELDS \ -+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) -+ -+/*************************** IPSEC AH fields *******************************/ -+#define NH_FLD_IPSEC_AH_SPI (1) -+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) -+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) -+ -+/*************************** IPSEC ESP fields ******************************/ -+#define NH_FLD_IPSEC_ESP_SPI (1) -+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) -+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) -+ -+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 -+ -+/*************************** MPLS fields ***********************************/ -+#define NH_FLD_MPLS_LABEL_STACK (1) -+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ -+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) -+ -+/*************************** MACSEC fields *********************************/ -+#define NH_FLD_MACSEC_SECTAG (1) -+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) -+ -+/*************************** GTP fields ************************************/ -+#define NH_FLD_GTP_TEID (1) -+ -+/* Protocol options */ -+ -+/* Ethernet options */ -+#define NH_OPT_ETH_BROADCAST 1 -+#define NH_OPT_ETH_MULTICAST 2 -+#define NH_OPT_ETH_UNICAST 3 -+#define NH_OPT_ETH_BPDU 4 -+ -+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) -+/* also applicable for broadcast */ -+ -+/* VLAN options */ -+#define NH_OPT_VLAN_CFI 1 -+ -+/* IPV4 options */ -+#define NH_OPT_IPV4_UNICAST 1 -+#define NH_OPT_IPV4_MULTICAST 2 -+#define NH_OPT_IPV4_BROADCAST 3 -+#define NH_OPT_IPV4_OPTION 4 -+#define NH_OPT_IPV4_FRAG 5 -+#define NH_OPT_IPV4_INITIAL_FRAG 6 -+ -+/* IPV6 options */ -+#define NH_OPT_IPV6_UNICAST 1 -+#define NH_OPT_IPV6_MULTICAST 2 -+#define NH_OPT_IPV6_OPTION 3 -+#define NH_OPT_IPV6_FRAG 4 -+#define NH_OPT_IPV6_INITIAL_FRAG 5 -+ -+/* General IP options (may be used for any version) */ -+#define NH_OPT_IP_FRAG 1 -+#define NH_OPT_IP_INITIAL_FRAG 2 -+#define NH_OPT_IP_OPTION 3 -+ -+/* Minenc. options */ -+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 -+ -+/* GRE. options */ -+#define NH_OPT_GRE_ROUTING_PRESENT 1 -+ -+/* TCP options */ -+#define NH_OPT_TCP_OPTIONS 1 -+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 -+#define NH_OPT_TCP_CONTROL_LOW_BITS 3 -+ -+/* CAPWAP options */ -+#define NH_OPT_CAPWAP_DTLS 1 -+ -+enum net_prot { -+ NET_PROT_NONE = 0, -+ NET_PROT_PAYLOAD, -+ NET_PROT_ETH, -+ NET_PROT_VLAN, -+ NET_PROT_IPV4, -+ NET_PROT_IPV6, -+ NET_PROT_IP, -+ NET_PROT_TCP, -+ NET_PROT_UDP, -+ NET_PROT_UDP_LITE, -+ NET_PROT_IPHC, -+ NET_PROT_SCTP, -+ NET_PROT_SCTP_CHUNK_DATA, -+ NET_PROT_PPPOE, -+ NET_PROT_PPP, -+ NET_PROT_PPPMUX, -+ NET_PROT_PPPMUX_SUBFRM, -+ NET_PROT_L2TPV2, -+ NET_PROT_L2TPV3_CTRL, -+ NET_PROT_L2TPV3_SESS, -+ NET_PROT_LLC, -+ NET_PROT_LLC_SNAP, -+ NET_PROT_NLPID, -+ NET_PROT_SNAP, -+ NET_PROT_MPLS, -+ NET_PROT_IPSEC_AH, -+ NET_PROT_IPSEC_ESP, -+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ -+ NET_PROT_MACSEC, -+ NET_PROT_GRE, -+ NET_PROT_MINENCAP, -+ NET_PROT_DCCP, -+ NET_PROT_ICMP, -+ NET_PROT_IGMP, -+ NET_PROT_ARP, -+ NET_PROT_CAPWAP_DATA, -+ NET_PROT_CAPWAP_CTRL, -+ NET_PROT_RFC2684, -+ NET_PROT_ICMPV6, -+ NET_PROT_FCOE, -+ NET_PROT_FIP, -+ NET_PROT_ISCSI, -+ NET_PROT_GTP, -+ NET_PROT_USER_DEFINED_L2, -+ NET_PROT_USER_DEFINED_L3, -+ NET_PROT_USER_DEFINED_L4, -+ NET_PROT_USER_DEFINED_L5, -+ NET_PROT_USER_DEFINED_SHIM1, -+ NET_PROT_USER_DEFINED_SHIM2, -+ -+ NET_PROT_DUMMY_LAST -+}; -+ -+/*! IEEE8021.Q */ -+#define NH_IEEE8021Q_ETYPE 0x8100 -+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ -+ ((((uint32_t)(etype & 0xFFFF)) << 16) | \ -+ (((uint32_t)(pcp & 0x07)) << 13) | \ -+ (((uint32_t)(dei & 0x01)) << 12) | \ -+ (((uint32_t)(vlan_id & 0xFFF)))) -+ -+#endif /* __FSL_NET_H */ -diff --git a/drivers/net/dpaa2/mc/mc_sys.c b/drivers/net/dpaa2/mc/mc_sys.c -new file mode 100644 -index 0000000..0a88cad ---- /dev/null -+++ b/drivers/net/dpaa2/mc/mc_sys.c -@@ -0,0 +1,129 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+ -+/* ODP framework using MC poratl in shared mode. Following -+ changes to introduce Locks must be maintained while -+ merging the FLIB. -+*/ -+ -+/** -+* The mc_spinlock_t type. -+*/ -+typedef struct { -+ volatile int locked; /**< lock status 0 = unlocked, 1 = locked */ -+} mc_spinlock_t; -+ -+/** -+* A static spinlock initializer. -+*/ -+static mc_spinlock_t mc_portal_lock = { 0 }; -+ -+static inline void mc_pause(void) {} -+ -+static inline void mc_spinlock_lock(mc_spinlock_t *sl) -+{ -+ while (__sync_lock_test_and_set(&sl->locked, 1)) -+ while (sl->locked) -+ mc_pause(); -+} -+ -+static inline void mc_spinlock_unlock(mc_spinlock_t *sl) -+{ -+ __sync_lock_release(&sl->locked); -+} -+ -+ -+static int mc_status_to_error(enum mc_cmd_status status) -+{ -+ switch (status) { -+ case MC_CMD_STATUS_OK: -+ return 0; -+ case MC_CMD_STATUS_AUTH_ERR: -+ return -EACCES; /* Token error */ -+ case MC_CMD_STATUS_NO_PRIVILEGE: -+ return -EPERM; /* Permission denied */ -+ case MC_CMD_STATUS_DMA_ERR: -+ return -EIO; /* Input/Output error */ -+ case MC_CMD_STATUS_CONFIG_ERR: -+ return -EINVAL; /* Device not configured */ -+ case MC_CMD_STATUS_TIMEOUT: -+ return -ETIMEDOUT; /* Operation timed out */ -+ case MC_CMD_STATUS_NO_RESOURCE: -+ return -ENAVAIL; /* Resource temporarily unavailable */ -+ case MC_CMD_STATUS_NO_MEMORY: -+ return -ENOMEM; /* Cannot allocate memory */ -+ case MC_CMD_STATUS_BUSY: -+ return -EBUSY; /* Device busy */ -+ case MC_CMD_STATUS_UNSUPPORTED_OP: -+ return -ENOTSUP; /* Operation not supported by device */ -+ case MC_CMD_STATUS_INVALID_STATE: -+ return -ENODEV; /* Invalid device state */ -+ default: -+ break; -+ } -+ -+ /* Not expected to reach here */ -+ return -EINVAL; -+} -+ -+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) -+{ -+ enum mc_cmd_status status; -+ -+ if (!mc_io || !mc_io->regs) -+ return -EACCES; -+ -+ /* --- Call lock function here in case portal is shared --- */ -+ mc_spinlock_lock(&mc_portal_lock); -+ -+ mc_write_command(mc_io->regs, cmd); -+ -+ /* Spin until status changes */ -+ do { -+ status = MC_CMD_HDR_READ_STATUS(ioread64(mc_io->regs)); -+ -+ /* --- Call wait function here to prevent blocking --- -+ * Change the loop condition accordingly to exit on timeout. -+ */ -+ } while (status == MC_CMD_STATUS_READY); -+ -+ /* Read the response back into the command buffer */ -+ mc_read_response(mc_io->regs, cmd); -+ -+ /* --- Call unlock function here in case portal is shared --- */ -+ mc_spinlock_unlock(&mc_portal_lock); -+ -+ return mc_status_to_error(status); -+} -+ -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_debug.c b/drivers/net/dpaa2/qbman/driver/qbman_debug.c -new file mode 100644 -index 0000000..e205681 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_debug.c -@@ -0,0 +1,926 @@ -+/* Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_portal.h" -+#include "qbman_debug.h" -+#include -+ -+/* QBMan portal management command code */ -+#define QBMAN_BP_QUERY 0x32 -+#define QBMAN_FQ_QUERY 0x44 -+#define QBMAN_FQ_QUERY_NP 0x45 -+#define QBMAN_WQ_QUERY 0x47 -+#define QBMAN_CGR_QUERY 0x51 -+#define QBMAN_WRED_QUERY 0x54 -+#define QBMAN_CGR_STAT_QUERY 0x55 -+#define QBMAN_CGR_STAT_QUERY_CLR 0x56 -+ -+enum qbman_attr_usage_e { -+ qbman_attr_usage_fq, -+ qbman_attr_usage_bpool, -+ qbman_attr_usage_cgr, -+ qbman_attr_usage_wqchan -+}; -+ -+struct int_qbman_attr { -+ uint32_t words[32]; -+ enum qbman_attr_usage_e usage; -+}; -+ -+#define attr_type_set(a, e) \ -+{ \ -+ struct qbman_attr *__attr = a; \ -+ enum qbman_attr_usage_e __usage = e; \ -+ ((struct int_qbman_attr *)__attr)->usage = __usage; \ -+} -+ -+#define ATTR32(d) (&(d)->dont_manipulate_directly[0]) -+#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16]) -+ -+static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1); -+static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1); -+static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1); -+static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16); -+static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16); -+static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16); -+static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16); -+static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16); -+static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16); -+static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14); -+static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15); -+static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1); -+static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32); -+static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32); -+static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32); -+static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32); -+static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16); -+static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3); -+static struct qb_attr_code code_bp_fill = QB_CODE(2 , 0, 32); -+static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32); -+static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8); -+static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 8, 8); -+static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 16, 8); -+ -+static void qbman_bp_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_bpool); -+} -+ -+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, -+ struct qbman_attr *a) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ uint32_t *attr = ATTR32(a); -+ -+ qbman_bp_attr_clear(a); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_bp_bpid, p, bpid); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_BP_QUERY); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_BP_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt); -+ return -EIO; -+ } -+ -+ /* For the query, word[0] of the result contains only the -+ * verb/rslt fields, so skip word[0]. -+ */ -+ word_copy(&attr[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p); -+ *va = !!qb_attr_code_decode(&code_bp_va, p); -+ *wae = !!qb_attr_code_decode(&code_bp_wae, p); -+} -+ -+static uint32_t qbman_bp_thresh_to_value(uint32_t val) -+{ -+ return (val & 0xff) << ((val & 0xf00) >> 8); -+} -+ -+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet, -+ p)); -+} -+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt, -+ p)); -+} -+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet, -+ p)); -+} -+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt, -+ p)); -+} -+ -+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset, -+ p)); -+} -+ -+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt, -+ p)); -+} -+ -+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p); -+} -+ -+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *icid = qb_attr_code_decode(&code_bp_icid, p); -+ *pl = !!qb_attr_code_decode(&code_bp_pl, p); -+} -+ -+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bpscn_addr = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_hi, -+ p) << 32) | -+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_lo, -+ p); -+} -+ -+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bpscn_ctx = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p) -+ << 32) | -+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_lo, -+ p); -+} -+ -+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p); -+} -+ -+int qbman_bp_info_has_free_bufs(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1); -+} -+ -+int qbman_bp_info_is_depleted(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2); -+} -+ -+int qbman_bp_info_is_surplus(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4); -+} -+ -+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_fill, p); -+} -+ -+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_hdptr, p); -+} -+ -+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_sdcnt, p); -+} -+ -+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_hdcnt, p); -+} -+ -+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_sscnt, p); -+} -+ -+static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16); -+static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15); -+static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8); -+static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15); -+static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13); -+static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12); -+static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1); -+static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1); -+static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1); -+static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1); -+static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1); -+static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1); -+static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32); -+static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32); -+static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15); -+static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1); -+static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24); -+static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24); -+ -+static void qbman_fq_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_fq); -+} -+ -+/* FQ query function for programmable fields */ -+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, struct qbman_attr *desc) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ uint32_t *d = ATTR32(desc); -+ -+ qbman_fq_attr_clear(desc); -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ qb_attr_code_encode(&code_fq_fqid, p, fqid); -+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_FQ_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of FQID 0x%x failed, code=0x%02x\n", -+ fqid, rslt); -+ return -EIO; -+ } -+ /* For the configure, word[0] of the command contains only the WE-mask. -+ * For the query, word[0] of the result contains only the verb/rslt -+ * fields. Skip word[0] in the latter case. */ -+ word_copy(&d[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p); -+} -+ -+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p); -+} -+ -+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *destwq = qb_attr_code_decode(&code_fq_destwq, p); -+} -+ -+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *icscred = qb_attr_code_decode(&code_fq_icscred, p); -+} -+ -+static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5); -+static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8); -+static uint32_t qbman_thresh_to_value(uint32_t val) -+{ -+ uint32_t m, e; -+ -+ m = qb_attr_code_decode(&code_tdthresh_mant, &val); -+ e = qb_attr_code_decode(&code_tdthresh_exp, &val); -+ return m << e; -+} -+ -+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh, -+ p)); -+} -+ -+void qbman_fq_attr_get_oa(struct qbman_attr *d, -+ int *oa_ics, int *oa_cgr, int32_t *oa_len) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p); -+ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p); -+ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len, -+ qb_attr_code_decode(&code_fq_oa_len, p)); -+} -+ -+void qbman_fq_attr_get_mctl(struct qbman_attr *d, -+ int *bdi, int *ff, int *va, int *ps) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p); -+ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p); -+ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p); -+ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p); -+} -+ -+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p); -+ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p); -+} -+ -+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *icid = qb_attr_code_decode(&code_fq_icid, p); -+ *pl = !!qb_attr_code_decode(&code_fq_pl, p); -+} -+ -+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p); -+} -+ -+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p); -+} -+ -+/* Query FQ Non-Programmalbe Fields */ -+static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3); -+static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1); -+static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1); -+static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1); -+static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1); -+static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24); -+static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32); -+ -+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *state) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ uint32_t *d = ATTR32(state); -+ -+ qbman_fq_attr_clear(state); -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ qb_attr_code_encode(&code_fq_fqid, p, fqid); -+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_FQ_QUERY_NP); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n", -+ fqid, rslt); -+ return -EIO; -+ } -+ word_copy(&d[0], &p[0], 16); -+ return 0; -+} -+ -+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_state, p); -+} -+ -+int qbman_fq_state_force_eligible(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_fe, p); -+} -+ -+int qbman_fq_state_xoff(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_x, p); -+} -+ -+int qbman_fq_state_retirement_pending(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_r, p); -+} -+ -+int qbman_fq_state_overflow_error(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_oe, p); -+} -+ -+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_frm_cnt, p); -+} -+ -+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_byte_cnt, p); -+} -+ -+/* Query CGR */ -+static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1); -+static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1); -+static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1); -+static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2); -+static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1); -+static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1); -+static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1); -+static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1); -+static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1); -+static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1); -+static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1); -+static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1); -+static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5); -+static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1); -+static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13); -+static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13); -+static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13); -+static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16); -+static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16); -+static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16); -+static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15); -+static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1); -+static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32); -+static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32); -+static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32); -+static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32); -+ -+static void qbman_cgr_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_cgr); -+} -+ -+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, struct qbman_attr *attr) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *d[2]; -+ int i; -+ uint32_t query_verb; -+ -+ d[0] = ATTR32(attr); -+ d[1] = ATTR32_1(attr); -+ -+ qbman_cgr_attr_clear(attr); -+ -+ for (i = 0; i < 2; i++) { -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY; -+ -+ qb_attr_code_encode(&code_cgr_cgid, p, cgid); -+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != query_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query CGID 0x%x failed,", cgid); -+ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt); -+ return -EIO; -+ } -+ /* For the configure, word[0] of the command contains only the -+ * verb/cgid. For the query, word[0] of the result contains -+ * only the verb/rslt fields. Skip word[0] in the latter case. -+ */ -+ word_copy(&d[i][1], &p[1], 15); -+ } -+ return 0; -+} -+ -+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, -+ int *cscn_wq_en_exit, int *cscn_wq_icd) -+ { -+ uint32_t *p = ATTR32(d); -+ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter, -+ p); -+ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p); -+ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p); -+} -+ -+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, -+ int *rej_cnt_mode, int *cscn_bdi) -+{ -+ uint32_t *p = ATTR32(d); -+ *mode = qb_attr_code_decode(&code_cgr_mode, p); -+ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p); -+ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p); -+} -+ -+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, -+ int *cscn_wr_en_exit, int *cg_wr_ae, -+ int *cscn_dcp_en, int *cg_wr_va) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter, -+ p); -+ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p); -+ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p); -+ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p); -+ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p); -+} -+ -+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, -+ uint32_t *i_cnt_wr_bnd) -+{ -+ uint32_t *p = ATTR32(d); -+ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p); -+ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p); -+} -+ -+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en) -+{ -+ uint32_t *p = ATTR32(d); -+ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p); -+} -+ -+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres) -+{ -+ uint32_t *p = ATTR32(d); -+ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_cs_thres, p)); -+} -+ -+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, -+ uint32_t *cs_thres_x) -+{ -+ uint32_t *p = ATTR32(d); -+ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_cs_thres_x, p)); -+} -+ -+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres) -+{ -+ uint32_t *p = ATTR32(d); -+ *td_thres = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_td_thres, p)); -+} -+ -+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p); -+} -+ -+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p); -+} -+ -+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, -+ uint32_t *cscn_vcgid) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p); -+} -+ -+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, -+ int *pl) -+{ -+ uint32_t *p = ATTR32(d); -+ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p); -+ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p); -+} -+ -+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, -+ uint64_t *cg_wr_addr) -+{ -+ uint32_t *p = ATTR32(d); -+ *cg_wr_addr = ((uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi, -+ p) << 32) | -+ (uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo, -+ p); -+} -+ -+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_ctx = ((uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p) -+ << 32) | -+ (uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p); -+} -+ -+#define WRED_EDP_WORD(n) (18 + n/4) -+#define WRED_EDP_OFFSET(n) (8 * (n % 4)) -+#define WRED_PARM_DP_WORD(n) (n + 20) -+#define WRED_WE_EDP(n) (16 + n * 2) -+#define WRED_WE_PARM_DP(n) (17 + n * 2) -+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, -+ int *edp) -+{ -+ uint32_t *p = ATTR32(d); -+ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx), -+ WRED_EDP_OFFSET(idx), 8); -+ *edp = (int)qb_attr_code_decode(&code_wred_edp, p); -+} -+ -+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, -+ uint64_t *maxth, uint8_t *maxp) -+{ -+ uint8_t ma, mn, step_i, step_s, pn; -+ -+ ma = (uint8_t)(dp >> 24); -+ mn = (uint8_t)(dp >> 19) & 0x1f; -+ step_i = (uint8_t)(dp >> 11); -+ step_s = (uint8_t)(dp >> 6) & 0x1f; -+ pn = (uint8_t)dp & 0x3f; -+ -+ *maxp = (uint8_t)(((pn<<2) * 100)/256); -+ -+ if (mn == 0) -+ *maxth = ma; -+ else -+ *maxth = ((ma+256) * (1<<(mn-1))); -+ -+ if (step_s == 0) -+ *minth = *maxth - step_i; -+ else -+ *minth = *maxth - (256 + step_i) * (1<<(step_s - 1)); -+} -+ -+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, -+ uint32_t *dp) -+{ -+ uint32_t *p = ATTR32(d); -+ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx), -+ 0, 8); -+ *dp = qb_attr_code_decode(&code_wred_parm_dp, p); -+} -+ -+/* Query CGR/CCGR/CQ statistics */ -+static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8); -+static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32); -+static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16); -+static int qbman_cgr_statistics_query(struct qbman_swp *s, uint32_t cgid, -+ int clear, uint32_t command_type, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t query_verb; -+ uint32_t hi, lo; -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ qb_attr_code_encode(&code_cgr_cgid, p, cgid); -+ if (command_type < 2) -+ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type); -+ query_verb = clear ? -+ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY; -+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != query_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query statistics of CGID 0x%x failed,", cgid); -+ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt); -+ return -EIO; -+ } -+ -+ if (*frame_cnt) { -+ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p); -+ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p); -+ *frame_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; -+ } -+ if (*byte_cnt) { -+ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p); -+ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p); -+ *byte_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; -+ } -+ -+ return 0; -+} -+ -+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 0xff, -+ frame_cnt, byte_cnt); -+} -+ -+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 1, -+ frame_cnt, byte_cnt); -+} -+ -+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 0, -+ frame_cnt, byte_cnt); -+} -+ -+/* WQ Chan Query */ -+static struct qb_attr_code code_wqchan_chanid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_wqchan_cdan_ctx_lo = QB_CODE(2, 0, 32); -+static struct qb_attr_code code_wqchan_cdan_ctx_hi = QB_CODE(3, 0, 32); -+static struct qb_attr_code code_wqchan_cdan_wqid = QB_CODE(1, 16, 16); -+static struct qb_attr_code code_wqchan_ctrl = QB_CODE(1, 8, 8); -+ -+static void qbman_wqchan_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_wqchan); -+} -+ -+int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid, -+ struct qbman_attr *a) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ uint32_t *attr = ATTR32(a); -+ -+ qbman_wqchan_attr_clear(a); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_wqchan_chanid, p, chanid); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQ_QUERY); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p); != QBMAN_WQ_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of WQCHAN 0x%x failed, code=0x%02x\n", -+ chanid, rslt); -+ return -EIO; -+ } -+ -+ /* For the query, word[0] of the result contains only the -+ * verb/rslt fields, so skip word[0]. -+ */ -+ word_copy(&attr[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_wqchan_attr_get_wqlen(struct qbman_attr *attr, int wq, uint32_t *len) -+{ -+ uint32_t *p = ATTR32(attr); -+ struct qb_attr_code code_wqchan_len = QB_CODE(wq+ 8, 0, 24); -+ *len = qb_attr_code_decode(&code_wqchan_len, p); -+} -+ -+void qbman_wqchan_attr_get_cdan_ctx(struct qbman_attr *attr, uint64_t *cdan_ctx) -+{ -+ uint32_t lo, hi; -+ uint32_t *p = ATTR32(attr); -+ -+ lo = qb_attr_code_decode(&code_wqchan_cdan_ctx_lo, p); -+ hi = qb_attr_code_decode(&code_wqchan_cdan_ctx_hi, p); -+ *cdan_ctx = ((uint64_t)hi << 32) | (uint64_t)lo; -+} -+ -+void qbman_wqchan_attr_get_cdan_wqid(struct qbman_attr *attr, -+ uint16_t *cdan_wqid) -+{ -+ uint32_t *p = ATTR32(attr); -+ *cdan_wqid = (uint16_t)qb_attr_code_decode(&code_wqchan_cdan_wqid, p); -+} -+ -+void qbman_wqchan_attr_get_ctrl(struct qbman_attr *attr, uint8_t *ctrl) -+{ -+ uint32_t *p = ATTR32(attr); -+ *ctrl = (uint8_t)qb_attr_code_decode(&code_wqchan_ctrl, p); -+} -+void qbman_wqchan_attr_get_chanid(struct qbman_attr *attr, uint16_t *chanid) -+{ -+ uint32_t *p = ATTR32(attr); -+ *chanid = (uint16_t)qb_attr_code_decode(&code_wqchan_chanid, p); -+} -+ -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_debug.h b/drivers/net/dpaa2/qbman/driver/qbman_debug.h -new file mode 100644 -index 0000000..8c89731 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_debug.h -@@ -0,0 +1,140 @@ -+/* Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+struct qbman_attr { -+ uint32_t dont_manipulate_directly[40]; -+}; -+ -+/* Buffer pool query commands */ -+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, -+ struct qbman_attr *a); -+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae); -+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet); -+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt); -+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet); -+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt); -+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset); -+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt); -+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid); -+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl); -+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr); -+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx); -+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ); -+int qbman_bp_info_has_free_bufs(struct qbman_attr *a); -+int qbman_bp_info_is_depleted(struct qbman_attr *a); -+int qbman_bp_info_is_surplus(struct qbman_attr *a); -+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a); -+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a); -+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a); -+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a); -+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a); -+ -+/* FQ query function for programmable fields */ -+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *desc); -+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl); -+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid); -+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq); -+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred); -+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh); -+void qbman_fq_attr_get_oa(struct qbman_attr *d, -+ int *oa_ics, int *oa_cgr, int32_t *oa_len); -+void qbman_fq_attr_get_mctl(struct qbman_attr *d, -+ int *bdi, int *ff, int *va, int *ps); -+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo); -+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl); -+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid); -+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid); -+ -+/* FQ query command for non-programmable fields*/ -+enum qbman_fq_schedstate_e { -+ qbman_fq_schedstate_oos = 0, -+ qbman_fq_schedstate_retired, -+ qbman_fq_schedstate_tentatively_scheduled, -+ qbman_fq_schedstate_truly_scheduled, -+ qbman_fq_schedstate_parked, -+ qbman_fq_schedstate_held_active, -+}; -+ -+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *state); -+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state); -+int qbman_fq_state_force_eligible(const struct qbman_attr *state); -+int qbman_fq_state_xoff(const struct qbman_attr *state); -+int qbman_fq_state_retirement_pending(const struct qbman_attr *state); -+int qbman_fq_state_overflow_error(const struct qbman_attr *state); -+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state); -+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state); -+ -+/* CGR query */ -+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, -+ struct qbman_attr *attr); -+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, -+ int *cscn_wq_en_exit, int *cscn_wq_icd); -+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, -+ int *rej_cnt_mode, int *cscn_bdi); -+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, -+ int *cscn_wr_en_exit, int *cg_wr_ae, -+ int *cscn_dcp_en, int *cg_wr_va); -+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, -+ uint32_t *i_cnt_wr_bnd); -+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en); -+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres); -+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, -+ uint32_t *cs_thres_x); -+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres); -+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp); -+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid); -+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, -+ uint32_t *cscn_vcgid); -+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, -+ int *pl); -+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, -+ uint64_t *cg_wr_addr); -+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx); -+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, -+ int *edp); -+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, -+ uint64_t *maxth, uint8_t *maxp); -+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, -+ uint32_t *dp); -+ -+/* CGR/CCGR/CQ statistics query */ -+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+ -+/* Query Work Queue Channel */ -+int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid, -+ struct qbman_attr *attr); -+void qbman_wqchan_attr_get_wqlen(struct qbman_attr *attr, int wq, uint32_t *len); -+void qbman_wqchan_attr_get_cdan_ctx(struct qbman_attr *attr, uint64_t *cdan_ctx); -+void qbman_wqchan_attr_get_cdan_wqid(struct qbman_attr *attr, -+ uint16_t *cdan_wqid); -+void qbman_wqchan_attr_get_ctrl(struct qbman_attr *attr, uint8_t *ctrl); -+void qbman_wqchan_attr_get_chanid(struct qbman_attr *attr, uint16_t *chanid); -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_portal.c b/drivers/net/dpaa2/qbman/driver/qbman_portal.c -new file mode 100644 -index 0000000..464f386 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_portal.c -@@ -0,0 +1,1407 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_portal.h" -+ -+/* QBMan portal management command codes */ -+#define QBMAN_MC_ACQUIRE 0x30 -+#define QBMAN_WQCHAN_CONFIGURE 0x46 -+ -+/* CINH register offsets */ -+#define QBMAN_CINH_SWP_EQCR_PI 0x800 -+#define QBMAN_CINH_SWP_EQCR_CI 0x840 -+#define QBMAN_CINH_SWP_EQAR 0x8c0 -+#define QBMAN_CINH_SWP_DQPI 0xa00 -+#define QBMAN_CINH_SWP_DCAP 0xac0 -+#define QBMAN_CINH_SWP_SDQCR 0xb00 -+#define QBMAN_CINH_SWP_RAR 0xcc0 -+#define QBMAN_CINH_SWP_ISR 0xe00 -+#define QBMAN_CINH_SWP_IER 0xe40 -+#define QBMAN_CINH_SWP_ISDR 0xe80 -+#define QBMAN_CINH_SWP_IIR 0xec0 -+ -+/* CENA register offsets */ -+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_CR 0x600 -+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1)) -+#define QBMAN_CENA_SWP_VDQCR 0x780 -+#define QBMAN_CENA_SWP_EQCR_CI 0x840 -+ -+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ -+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6) -+ -+/* QBMan FQ management command codes */ -+#define QBMAN_FQ_SCHEDULE 0x48 -+#define QBMAN_FQ_FORCE 0x49 -+#define QBMAN_FQ_XON 0x4d -+#define QBMAN_FQ_XOFF 0x4e -+ -+/*******************************/ -+/* Pre-defined attribute codes */ -+/*******************************/ -+ -+struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7); -+struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8); -+ -+/*************************/ -+/* SDQCR attribute codes */ -+/*************************/ -+ -+/* we put these here because at least some of them are required by -+ * qbman_swp_init() */ -+struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2); -+struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1); -+struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8); -+#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1) -+enum qbman_sdqcr_dct { -+ qbman_sdqcr_dct_null = 0, -+ qbman_sdqcr_dct_prio_ics, -+ qbman_sdqcr_dct_active_ics, -+ qbman_sdqcr_dct_active -+}; -+enum qbman_sdqcr_fc { -+ qbman_sdqcr_fc_one = 0, -+ qbman_sdqcr_fc_up_to_3 = 1 -+}; -+struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16); -+ -+/*********************************/ -+/* Portal constructor/destructor */ -+/*********************************/ -+ -+/* Software portals should always be in the power-on state when we initialise, -+ * due to the CCSR-based portal reset functionality that MC has. -+ * -+ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR -+ * valid-bits, so we need to support a workaround where we don't trust -+ * valid-bits when detecting new entries until any stale ring entries have been -+ * overwritten at least once. The idea is that we read PI for the first few -+ * entries, then switch to valid-bit after that. The trick is to clear the -+ * bug-work-around boolean once the PI wraps around the ring for the first time. -+ * -+ * Note: this still carries a slight additional cost once the decrementer hits -+ * zero. -+ */ -+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) -+{ -+ int ret; -+ uint32_t eqcr_pi; -+ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL); -+ if (!p) -+ return NULL; -+ p->desc = d; -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_start; -+#endif -+ p->mc.valid_bit = QB_VALID_BIT; -+ p->sdq = 0; -+ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics); -+ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3); -+ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb); -+ atomic_set(&p->vdq.busy, 1); -+ p->vdq.valid_bit = QB_VALID_BIT; -+ p->dqrr.next_idx = 0; -+ p->dqrr.valid_bit = QB_VALID_BIT; -+ qman_version = p->desc->qman_version; -+ if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) { -+ p->dqrr.dqrr_size = 4; -+ p->dqrr.reset_bug = 1; -+ } else { -+ p->dqrr.dqrr_size = 8; -+ p->dqrr.reset_bug = 0; -+ } -+ -+ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size); -+ if (ret) { -+ kfree(p); -+ pr_err("qbman_swp_sys_init() failed %d\n", ret); -+ return NULL; -+ } -+ /* SDQCR needs to be initialized to 0 when no channels are -+ being dequeued from or else the QMan HW will indicate an -+ error. The values that were calculated above will be -+ applied when dequeues from a specific channel are enabled */ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0); -+ eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI); -+ p->eqcr.pi = eqcr_pi & 0xF; -+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT; -+ p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF; -+ p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE, -+ p->eqcr.ci, p->eqcr.pi); -+ -+ return p; -+} -+ -+void qbman_swp_finish(struct qbman_swp *p) -+{ -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_start); -+#endif -+ qbman_swp_sys_finish(&p->sys); -+ kfree(p); -+} -+ -+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p) -+{ -+ return p->desc; -+} -+ -+/**************/ -+/* Interrupts */ -+/**************/ -+ -+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR); -+} -+ -+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask); -+} -+ -+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR); -+} -+ -+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask); -+} -+ -+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER); -+} -+ -+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask); -+} -+ -+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR); -+} -+ -+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0); -+} -+ -+/***********************/ -+/* Management commands */ -+/***********************/ -+ -+/* -+ * Internal code common to all types of management commands. -+ */ -+ -+void *qbman_swp_mc_start(struct qbman_swp *p) -+{ -+ void *ret; -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_start); -+#endif -+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR); -+#ifdef QBMAN_CHECKING -+ if (!ret) -+ p->mc.check = swp_mc_can_submit; -+#endif -+ return ret; -+} -+ -+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb) -+{ -+ uint32_t *v = cmd; -+#ifdef QBMAN_CHECKING -+ BUG_ON(!p->mc.check != swp_mc_can_submit); -+#endif -+ /* TBD: "|=" is going to hurt performance. Need to move as many fields -+ * out of word zero, and for those that remain, the "OR" needs to occur -+ * at the caller side. This debug check helps to catch cases where the -+ * caller wants to OR but has forgotten to do so. */ -+ BUG_ON((*v & cmd_verb) != *v); -+ *v = cmd_verb | p->mc.valid_bit; -+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd); -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_poll; -+#endif -+} -+ -+void *qbman_swp_mc_result(struct qbman_swp *p) -+{ -+ uint32_t *ret, verb; -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_poll); -+#endif -+ qbman_cena_invalidate_prefetch(&p->sys, -+ QBMAN_CENA_SWP_RR(p->mc.valid_bit)); -+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); -+ /* Remove the valid-bit - command completed iff the rest is non-zero */ -+ verb = ret[0] & ~QB_VALID_BIT; -+ if (!verb) -+ return NULL; -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_start; -+#endif -+ p->mc.valid_bit ^= QB_VALID_BIT; -+ return ret; -+} -+ -+/***********/ -+/* Enqueue */ -+/***********/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2); -+static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1); -+static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1); -+static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1); -+static struct qb_attr_code code_eq_dca_idx = QB_CODE(0, 8, 2); -+static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1); -+static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1); -+static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1); -+static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14); -+static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16); -+static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24); -+/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */ -+static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1); -+static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16); -+static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4); -+static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1); -+static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8); -+static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32); -+ -+enum qbman_eq_cmd_e { -+ /* No enqueue, primarily for plugging ORP gaps for dropped frames */ -+ qbman_eq_cmd_empty, -+ /* DMA an enqueue response once complete */ -+ qbman_eq_cmd_respond, -+ /* DMA an enqueue response only if the enqueue fails */ -+ qbman_eq_cmd_respond_reject -+}; -+ -+void qbman_eq_desc_clear(struct qbman_eq_desc *d) -+{ -+ memset(d, 0, sizeof(*d)); -+} -+ -+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_eq_orp_en, cl, 0); -+ qb_attr_code_encode(&code_eq_cmd, cl, -+ respond_success ? qbman_eq_cmd_respond : -+ qbman_eq_cmd_respond_reject); -+} -+ -+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, -+ uint32_t opr_id, uint32_t seqnum, int incomplete) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, -+ respond_success ? qbman_eq_cmd_respond : -+ qbman_eq_cmd_respond_reject); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete); -+} -+ -+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); -+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0); -+} -+ -+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); -+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1); -+} -+ -+void qbman_eq_desc_set_response(struct qbman_eq_desc *d, -+ dma_addr_t storage_phys, -+ int stash) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys); -+ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash); -+} -+ -+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token); -+} -+ -+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_eq_qd_en, cl, 0); -+ qb_attr_code_encode(&code_eq_tgt_id, cl, fqid); -+} -+ -+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid, -+ uint32_t qd_bin, uint32_t qd_prio) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_eq_qd_en, cl, 1); -+ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid); -+ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin); -+ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio); -+} -+ -+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_eq_eqdi, cl, !!enable); -+} -+ -+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable, -+ uint32_t dqrr_idx, int park) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_eq_dca_en, cl, !!enable); -+ if (enable) { -+ qb_attr_code_encode(&code_eq_dca_pk, cl, !!park); -+ qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx); -+ } -+} -+ -+#define EQAR_IDX(eqar) ((eqar) & 0x7) -+#define EQAR_VB(eqar) ((eqar) & 0x80) -+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100) -+static int qbman_swp_enqueue_array_mode(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR); -+ pr_debug("EQAR=%08x\n", eqar); -+ if (!EQAR_SUCCESS(eqar)) -+ return -EBUSY; -+ p = qbman_cena_write_start_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); -+ word_copy(&p[1], &cl[1], 7); -+ word_copy(&p[8], fd, sizeof(*fd) >> 2); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ lwsync(); -+ p[0] = cl[0] | EQAR_VB(eqar); -+ qbman_cena_write_complete_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); -+ return 0; -+} -+ -+static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t eqcr_ci; -+ uint8_t diff; -+ -+ if (!s->eqcr.available) { -+ eqcr_ci = s->eqcr.ci; -+ s->eqcr.ci = qbman_cena_read_reg(&s->sys, -+ QBMAN_CENA_SWP_EQCR_CI) & 0xF; -+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE, -+ eqcr_ci, s->eqcr.ci); -+ s->eqcr.available += diff; -+ if (!diff) -+ return -EBUSY; -+ } -+ -+ p = qbman_cena_write_start_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7)); -+ word_copy(&p[1], &cl[1], 7); -+ word_copy(&p[8], fd, sizeof(*fd) >> 2); -+ lwsync(); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ p[0] = cl[0] | s->eqcr.pi_vb; -+ qbman_cena_write_complete_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7)); -+ s->eqcr.pi++; -+ s->eqcr.pi &= 0xF; -+ s->eqcr.available--; -+ if (!(s->eqcr.pi & 7)) -+ s->eqcr.pi_vb ^= QB_VALID_BIT; -+ return 0; -+} -+ -+int qbman_swp_fill_ring(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd, -+ __attribute__((unused)) uint8_t burst_index) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t eqcr_ci; -+ uint8_t diff; -+ -+ if (!s->eqcr.available) { -+ eqcr_ci = s->eqcr.ci; -+ s->eqcr.ci = qbman_cena_read_reg(&s->sys, -+ QBMAN_CENA_SWP_EQCR_CI) & 0xF; -+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE, -+ eqcr_ci, s->eqcr.ci); -+ s->eqcr.available += diff; -+ if (!diff) { -+ return -EBUSY; -+ } -+ } -+ p = qbman_cena_write_start_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7)); -+ //word_copy(&p[1], &cl[1], 7); -+ memcpy(&p[1], &cl[1], 7); -+ /* word_copy(&p[8], fd, sizeof(*fd) >> 2); */ -+ memcpy(&p[8], fd, sizeof(struct qbman_fd)); -+ -+ //lwsync(); -+ -+ p[0] = cl[0] | s->eqcr.pi_vb; -+ -+ s->eqcr.pi++; -+ s->eqcr.pi &= 0xF; -+ s->eqcr.available--; -+ if (!(s->eqcr.pi & 7)) -+ s->eqcr.pi_vb ^= QB_VALID_BIT; -+ -+ return 0; -+} -+ -+int qbman_swp_flush_ring(struct qbman_swp *s) -+{ -+ void *ptr = s->sys.addr_cena; -+ dcbf((uint64_t)ptr); -+ dcbf((uint64_t)ptr + 0x40); -+ dcbf((uint64_t)ptr + 0x80); -+ dcbf((uint64_t)ptr + 0xc0); -+ dcbf((uint64_t)ptr + 0x100); -+ dcbf((uint64_t)ptr + 0x140); -+ dcbf((uint64_t)ptr + 0x180); -+ dcbf((uint64_t)ptr + 0x1c0); -+ -+ return 0; -+} -+ -+void qbman_sync(void) -+{ -+ lwsync(); -+} -+ -+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd) -+{ -+ if (s->sys.eqcr_mode == qman_eqcr_vb_array) -+ return qbman_swp_enqueue_array_mode(s, d, fd); -+ else /* Use ring mode by default */ -+ return qbman_swp_enqueue_ring_mode(s, d, fd); -+} -+ -+/*************************/ -+/* Static (push) dequeue */ -+/*************************/ -+ -+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled) -+{ -+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); -+ -+ BUG_ON(channel_idx > 15); -+ *enabled = (int)qb_attr_code_decode(&code, &s->sdq); -+} -+ -+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable) -+{ -+ uint16_t dqsrc; -+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); -+ BUG_ON(channel_idx > 15); -+ qb_attr_code_encode(&code, &s->sdq, !!enable); -+ /* Read make the complete src map. If no channels are enabled -+ the SDQCR must be 0 or else QMan will assert errors */ -+ dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq); -+ if (dqsrc != 0) -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq); -+ else -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0); -+} -+ -+/***************************/ -+/* Volatile (pull) dequeue */ -+/***************************/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2); -+static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2); -+static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1); -+static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1); -+static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4); -+static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8); -+static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32); -+ -+enum qb_pull_dt_e { -+ qb_pull_dt_channel, -+ qb_pull_dt_workqueue, -+ qb_pull_dt_framequeue -+}; -+ -+void qbman_pull_desc_clear(struct qbman_pull_desc *d) -+{ -+ memset(d, 0, sizeof(*d)); -+} -+ -+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, -+ struct qbman_result *storage, -+ dma_addr_t storage_phys, -+ int stash) -+{ -+ uint32_t *cl = qb_cl(d); -+ /* Squiggle the pointer 'storage' into the extra 2 words of the -+ * descriptor (which aren't copied to the hw command) */ -+ *(void **)&cl[4] = storage; -+ if (!storage) { -+ qb_attr_code_encode(&code_pull_rls, cl, 0); -+ return; -+ } -+ qb_attr_code_encode(&code_pull_rls, cl, 1); -+ qb_attr_code_encode(&code_pull_stash, cl, !!stash); -+ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys); -+} -+ -+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes) -+{ -+ uint32_t *cl = qb_cl(d); -+ BUG_ON(!numframes || (numframes > 16)); -+ qb_attr_code_encode(&code_pull_numframes, cl, -+ (uint32_t)(numframes - 1)); -+} -+ -+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_pull_token, cl, token); -+} -+ -+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_pull_dct, cl, 1); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue); -+ qb_attr_code_encode(&code_pull_dqsource, cl, fqid); -+} -+ -+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid, -+ enum qbman_pull_type_e dct) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_pull_dct, cl, dct); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue); -+ qb_attr_code_encode(&code_pull_dqsource, cl, wqid); -+} -+ -+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid, -+ enum qbman_pull_type_e dct) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_pull_dct, cl, dct); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel); -+ qb_attr_code_encode(&code_pull_dqsource, cl, chid); -+} -+ -+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) -+{ -+ uint32_t *p; -+ uint32_t *cl = qb_cl(d); -+ if (!atomic_dec_and_test(&s->vdq.busy)) { -+ atomic_inc(&s->vdq.busy); -+ return -EBUSY; -+ } -+ s->vdq.storage = *(void **)&cl[4]; -+ qb_attr_code_encode(&code_pull_token, cl, 1); -+ p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR); -+ word_copy(&p[1], &cl[1], 3); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ lwsync(); -+ p[0] = cl[0] | s->vdq.valid_bit; -+ s->vdq.valid_bit ^= QB_VALID_BIT; -+ qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR); -+ return 0; -+} -+ -+/****************/ -+/* Polling DQRR */ -+/****************/ -+ -+static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8); -+static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7); -+static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8); -+static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14); -+static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16); -+/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */ -+static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24); -+static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24); -+static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32); -+ -+#define QBMAN_RESULT_DQ 0x60 -+#define QBMAN_RESULT_FQRN 0x21 -+#define QBMAN_RESULT_FQRNI 0x22 -+#define QBMAN_RESULT_FQPN 0x24 -+#define QBMAN_RESULT_FQDAN 0x25 -+#define QBMAN_RESULT_CDAN 0x26 -+#define QBMAN_RESULT_CSCN_MEM 0x27 -+#define QBMAN_RESULT_CGCU 0x28 -+#define QBMAN_RESULT_BPSCN 0x29 -+#define QBMAN_RESULT_CSCN_WQ 0x2a -+ -+static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4); -+ -+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry -+ * only once, so repeated calls can return a sequence of DQRR entries, without -+ * requiring they be consumed immediately or in any particular order. */ -+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s) -+{ -+ uint32_t verb; -+ uint32_t response_verb; -+ uint32_t flags; -+ const struct qbman_result *dq; -+ const uint32_t *p; -+ -+ /* Before using valid-bit to detect if something is there, we have to -+ * handle the case of the DQRR reset bug... */ -+ if (unlikely(s->dqrr.reset_bug)) { -+ /* We pick up new entries by cache-inhibited producer index, -+ * which means that a non-coherent mapping would require us to -+ * invalidate and read *only* once that PI has indicated that -+ * there's an entry here. The first trip around the DQRR ring -+ * will be much less efficient than all subsequent trips around -+ * it... -+ */ -+ uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI); -+ uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi); -+ /* there are new entries iff pi != next_idx */ -+ if (pi == s->dqrr.next_idx) -+ return NULL; -+ /* if next_idx is/was the last ring index, and 'pi' is -+ * different, we can disable the workaround as all the ring -+ * entries have now been DMA'd to so valid-bit checking is -+ * repaired. Note: this logic needs to be based on next_idx -+ * (which increments one at a time), rather than on pi (which -+ * can burst and wrap-around between our snapshots of it). -+ */ -+ BUG_ON((s->dqrr.dqrr_size - 1) < 0); -+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) { -+ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n", -+ s->dqrr.next_idx, pi); -+ s->dqrr.reset_bug = 0; -+ } -+ qbman_cena_invalidate_prefetch(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ } -+ dq = qbman_cena_read_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ p = qb_cl(dq); -+ verb = qb_attr_code_decode(&code_dqrr_verb, p); -+ /* If the valid-bit isn't of the expected polarity, nothing there. Note, -+ * in the DQRR reset bug workaround, we shouldn't need to skip these -+ * check, because we've already determined that a new entry is available -+ * and we've invalidated the cacheline before reading it, so the -+ * valid-bit behaviour is repaired and should tell us what we already -+ * knew from reading PI. -+ */ -+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) -+ return NULL; -+ -+ /* There's something there. Move "next_idx" attention to the next ring -+ * entry (and prefetch it) before returning what we found. */ -+ s->dqrr.next_idx++; -+ if (s->dqrr.next_idx == QBMAN_DQRR_SIZE) { -+ s->dqrr.next_idx = 0; -+ s->dqrr.valid_bit ^= QB_VALID_BIT; -+ } -+ /* If this is the final response to a volatile dequeue command -+ indicate that the vdq is no longer busy */ -+ flags = qbman_result_DQ_flags(dq); -+ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb); -+ if ((response_verb == QBMAN_RESULT_DQ) && -+ (flags & QBMAN_DQ_STAT_VOLATILE) && -+ (flags & QBMAN_DQ_STAT_EXPIRED)) -+ atomic_inc(&s->vdq.busy); -+ -+ return dq; -+} -+ -+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */ -+void qbman_swp_dqrr_consume(struct qbman_swp *s, -+ const struct qbman_result *dq) -+{ -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); -+} -+ -+/*********************************/ -+/* Polling user-provided storage */ -+/*********************************/ -+ -+int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s, -+ const struct qbman_result *dq) -+{ -+ /* To avoid converting the little-endian DQ entry to host-endian prior -+ * to us knowing whether there is a valid entry or not (and run the -+ * risk of corrupting the incoming hardware LE write), we detect in -+ * hardware endianness rather than host. This means we need a different -+ * "code" depending on whether we are BE or LE in software, which is -+ * where DQRR_TOK_OFFSET comes in... */ -+ static struct qb_attr_code code_dqrr_tok_detect = -+ QB_CODE(0, DQRR_TOK_OFFSET, 8); -+ /* The user trying to poll for a result treats "dq" as const. It is -+ * however the same address that was provided to us non-const in the -+ * first place, for directing hardware DMA to. So we can cast away the -+ * const because it is mutable from our perspective. */ -+ uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq); -+ uint32_t token; -+ -+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]); -+ if (token != 1) -+ return 0; -+ qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0); -+ -+ /* Only now do we convert from hardware to host endianness. Also, as we -+ * are returning success, the user has promised not to call us again, so -+ * there's no risk of us converting the endianness twice... */ -+ make_le32_n(p, 16); -+ return 1; -+} -+ -+int qbman_check_command_complete(struct qbman_swp *s, -+ const struct qbman_result *dq) -+{ -+ /* To avoid converting the little-endian DQ entry to host-endian prior -+ * to us knowing whether there is a valid entry or not (and run the -+ * risk of corrupting the incoming hardware LE write), we detect in -+ * hardware endianness rather than host. This means we need a different -+ * "code" depending on whether we are BE or LE in software, which is -+ * where DQRR_TOK_OFFSET comes in... */ -+ static struct qb_attr_code code_dqrr_tok_detect = -+ QB_CODE(0, DQRR_TOK_OFFSET, 8); -+ /* The user trying to poll for a result treats "dq" as const. It is -+ * however the same address that was provided to us non-const in the -+ * first place, for directing hardware DMA to. So we can cast away the -+ * const because it is mutable from our perspective. */ -+ uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq); -+ uint32_t token; -+ -+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]); -+ if(token!=1) -+ return 0; -+ /*When token is set it indicates that VDQ command has been fetched by qbman and -+ *is working on it. It is safe for software to issue another VDQ command, so -+ *incrementing the busy variable.*/ -+ if (s->vdq.storage == dq) { -+ s->vdq.storage = NULL; -+ atomic_inc(&s->vdq.busy); -+ } -+ return 1; -+} -+ -+/********************************/ -+/* Categorising qbman results */ -+/********************************/ -+ -+static struct qb_attr_code code_result_in_mem = -+ QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7); -+ -+static inline int __qbman_result_is_x(const struct qbman_result *dq, -+ uint32_t x) -+{ -+ const uint32_t *p = qb_cl(dq); -+ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p); -+ return (response_verb == x); -+} -+ -+static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq, -+ uint32_t x) -+{ -+ const uint32_t *p = qb_cl(dq); -+ uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p); -+ -+ return (response_verb == x); -+} -+ -+int qbman_result_is_DQ(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ); -+} -+ -+int qbman_result_is_FQDAN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN); -+} -+ -+int qbman_result_is_CDAN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN); -+} -+ -+int qbman_result_is_CSCN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) || -+ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ); -+} -+ -+int qbman_result_is_BPSCN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN); -+} -+ -+int qbman_result_is_CGCU(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU); -+} -+ -+int qbman_result_is_FQRN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN); -+} -+ -+int qbman_result_is_FQRNI(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI); -+} -+ -+int qbman_result_is_FQPN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN); -+} -+ -+/*********************************/ -+/* Parsing frame dequeue results */ -+/*********************************/ -+ -+/* These APIs assume qbman_result_is_DQ() is TRUE */ -+ -+uint32_t qbman_result_DQ_flags(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ return qb_attr_code_decode(&code_dqrr_stat, p); -+} -+ -+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p); -+} -+ -+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p); -+} -+ -+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ return qb_attr_code_decode(&code_dqrr_fqid, p); -+} -+ -+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ return qb_attr_code_decode(&code_dqrr_byte_count, p); -+} -+ -+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ return qb_attr_code_decode(&code_dqrr_frame_count, p); -+} -+ -+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq) -+{ -+ const uint64_t *p = (const uint64_t *)qb_cl(dq); -+ -+ return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p); -+} -+ -+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ return (const struct qbman_fd *)&p[8]; -+} -+ -+/**************************************/ -+/* Parsing state-change notifications */ -+/**************************************/ -+ -+static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8); -+static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_scn_state_in_mem = -+ QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8); -+static struct qb_attr_code code_scn_rid_in_mem = -+ QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24); -+static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32); -+ -+uint8_t qbman_result_SCN_state(const struct qbman_result *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ return (uint8_t)qb_attr_code_decode(&code_scn_state, p); -+} -+ -+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ return qb_attr_code_decode(&code_scn_rid, p); -+} -+ -+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn) -+{ -+ const uint64_t *p = (const uint64_t *)qb_cl(scn); -+ -+ return qb_attr_code_decode_64(&code_scn_ctx_lo, p); -+} -+ -+uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p); -+} -+ -+uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ uint32_t result_rid; -+ -+ result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p); -+ return make_le24(result_rid); -+} -+ -+/*****************/ -+/* Parsing BPSCN */ -+/*****************/ -+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn) -+{ -+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF; -+} -+ -+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn) -+{ -+ return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1); -+} -+ -+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn) -+{ -+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2); -+} -+ -+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn) -+{ -+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4); -+} -+ -+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn) -+{ -+ uint64_t ctx; -+ uint32_t ctx_hi, ctx_lo; -+ -+ ctx = qbman_result_SCN_ctx(scn); -+ ctx_hi = upper32(ctx); -+ ctx_lo = lower32(ctx); -+ return ((uint64_t)make_le32(ctx_hi) << 32 | -+ (uint64_t)make_le32(ctx_lo)); -+} -+ -+/*****************/ -+/* Parsing CGCU */ -+/*****************/ -+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn) -+{ -+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF; -+} -+ -+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn) -+{ -+ uint64_t ctx; -+ uint32_t ctx_hi, ctx_lo; -+ -+ ctx = qbman_result_SCN_ctx(scn); -+ ctx_hi = upper32(ctx); -+ ctx_lo = lower32(ctx); -+ return ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) | -+ (uint64_t)make_le32(ctx_lo); -+} -+ -+/******************/ -+/* Buffer release */ -+/******************/ -+ -+/* These should be const, eventually */ -+/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */ -+static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1); -+static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1); -+static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16); -+ -+void qbman_release_desc_clear(struct qbman_release_desc *d) -+{ -+ uint32_t *cl; -+ memset(d, 0, sizeof(*d)); -+ cl = qb_cl(d); -+ qb_attr_code_encode(&code_release_set_me, cl, 1); -+} -+ -+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_release_bpid, cl, bpid); -+} -+ -+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) -+{ -+ uint32_t *cl = qb_cl(d); -+ qb_attr_code_encode(&code_release_rcdi, cl, !!enable); -+} -+ -+#define RAR_IDX(rar) ((rar) & 0x7) -+#define RAR_VB(rar) ((rar) & 0x80) -+#define RAR_SUCCESS(rar) ((rar) & 0x100) -+ -+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, -+ const uint64_t *buffers, unsigned int num_buffers) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR); -+ pr_debug("RAR=%08x\n", rar); -+ if (!RAR_SUCCESS(rar)) -+ return -EBUSY; -+ BUG_ON(!num_buffers || (num_buffers > 7)); -+ /* Start the release command */ -+ p = qbman_cena_write_start_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); -+ /* Copy the caller's buffer pointers to the command */ -+ u64_to_le32_copy(&p[2], buffers, num_buffers); -+ /* Set the verb byte, have to substitute in the valid-bit and the number -+ * of buffers. */ -+ lwsync(); -+ p[0] = cl[0] | RAR_VB(rar) | num_buffers; -+ qbman_cena_write_complete_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); -+ return 0; -+} -+ -+/*******************/ -+/* Buffer acquires */ -+/*******************/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3); -+static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3); -+ -+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers, -+ unsigned int num_buffers) -+{ -+ uint32_t *p; -+ uint32_t rslt, num; -+ BUG_ON(!num_buffers || (num_buffers > 7)); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_acquire_bpid, p, bpid); -+ qb_attr_code_encode(&code_acquire_num, p, num_buffers); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ num = qb_attr_code_decode(&code_acquire_r_num, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_MC_ACQUIRE); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n", -+ bpid, rslt); -+ return -EIO; -+ } -+ BUG_ON(num > num_buffers); -+ /* Copy the acquired buffers to the caller's array */ -+ u64_from_le32_copy(buffers, &p[2], num); -+ return (int)num; -+} -+ -+/*****************/ -+/* FQ management */ -+/*****************/ -+ -+static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32); -+ -+static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid, -+ uint8_t alt_fq_verb) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ qb_attr_code_encode(&code_fqalt_fqid, p, fqid); -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n", -+ fqid, alt_fq_verb, rslt); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE); -+} -+ -+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE); -+} -+ -+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON); -+} -+ -+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF); -+} -+ -+/**********************/ -+/* Channel management */ -+/**********************/ -+ -+static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12); -+static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8); -+static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1); -+static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32); -+ -+/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it -+ * would be irresponsible to expose it. */ -+#define CODE_CDAN_WE_EN 0x1 -+#define CODE_CDAN_WE_CTX 0x4 -+ -+static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid, -+ uint8_t we_mask, uint8_t cdan_en, -+ uint64_t ctx) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_cdan_cid, p, channelid); -+ qb_attr_code_encode(&code_cdan_we, p, we_mask); -+ qb_attr_code_encode(&code_cdan_en, p, cdan_en); -+ qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx); -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) -+ != QBMAN_WQCHAN_CONFIGURE); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("CDAN cQID %d failed: code = 0x%02x\n", -+ channelid, rslt); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_CTX, -+ 0, ctx); -+} -+ -+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN, -+ 1, 0); -+} -+ -+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN, -+ 0, 0); -+} -+ -+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX, -+ 1, ctx); -+} -+ -+uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr) -+{ -+ return QBMAN_IDX_FROM_DQRR(dqrr); -+} -+ -+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx) -+{ -+ struct qbman_result *dq; -+ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx)); -+ return dq; -+} -+ -+int qbman_swp_send_multiple(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd, -+ int frames_to_send) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t eqcr_ci; -+ uint8_t diff; -+ int sent = 0; -+ int i; -+ int initial_pi = s->eqcr.pi; -+ uint64_t start_pointer; -+ -+ -+ /* we are trying to send frames_to_send if we have enough space in the ring */ -+ while(frames_to_send--) -+ { -+ if (!s->eqcr.available) { -+ eqcr_ci = s->eqcr.ci; -+ s->eqcr.ci = qbman_cena_read_reg(&s->sys, -+ QBMAN_CENA_SWP_EQCR_CI) & 0xF; -+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE, -+ eqcr_ci, s->eqcr.ci); -+ s->eqcr.available += diff; -+ if (!diff) -+ { -+ goto done; -+ } -+ } -+ -+ p = qbman_cena_write_start_wo_shadow_fast(&s->sys, -+ QBMAN_CENA_SWP_EQCR((initial_pi) & 7)); -+ /* Write command (except of first byte) and FD */ -+ memcpy(&p[1], &cl[1], 7); -+ memcpy(&p[8], &fd[sent], sizeof(struct qbman_fd)); -+ -+ initial_pi++; -+ initial_pi &= 0xF; -+ s->eqcr.available--; -+ sent++; -+ -+ } -+ -+ done: -+ initial_pi = s->eqcr.pi; -+ lwsync(); -+ -+ /* in order for flushes to complete faster */ -+ /*For that we use a following trick: we record all lines in 32 bit word */ -+ -+ initial_pi = s->eqcr.pi; -+ for(i = 0; i < sent; i++) -+ { -+ p = qbman_cena_write_start_wo_shadow_fast(&s->sys, -+ QBMAN_CENA_SWP_EQCR((initial_pi) & 7)); -+ -+ p[0] = cl[0] | s->eqcr.pi_vb; -+ initial_pi++; -+ initial_pi &= 0xF; -+ -+ if (!(initial_pi & 7)) -+ s->eqcr.pi_vb ^= QB_VALID_BIT; -+ -+ } -+ -+ initial_pi = s->eqcr.pi; -+ -+ /* We need to flush all the lines but without load/store operations between them */ -+ /* We assign start_pointer before we start loop so that in loop we do not read it from memory */ -+ start_pointer = (uint64_t) s->sys.addr_cena; -+ for(i = 0; i < sent; i++) -+ { -+ p = (uint32_t *)(start_pointer + QBMAN_CENA_SWP_EQCR(initial_pi & 7)); -+ dcbf((uint64_t)p); -+ initial_pi++; -+ initial_pi &= 0xF; -+ } -+ -+ /* Update producer index for the next call */ -+ s->eqcr.pi = initial_pi; -+ -+ return sent; -+} -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_portal.h b/drivers/net/dpaa2/qbman/driver/qbman_portal.h -new file mode 100644 -index 0000000..f6ba86a ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_portal.h -@@ -0,0 +1,266 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_private.h" -+#include -+ -+uint32_t qman_version; -+/* All QBMan command and result structures use this "valid bit" encoding */ -+#define QB_VALID_BIT ((uint32_t)0x80) -+ -+/* Management command result codes */ -+#define QBMAN_MC_RSLT_OK 0xf0 -+ -+/* TBD: as of QBMan 4.1, DQRR will be 8 rather than 4! */ -+#define QBMAN_DQRR_SIZE 4 -+ -+#define QBMAN_EQCR_SIZE 8 -+ -+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last) -+{ -+ /* 'first' is included, 'last' is excluded */ -+ if (first <= last) -+ return last - first; -+ return (2 * ringsize) + last - first; -+} -+ -+/* --------------------- */ -+/* portal data structure */ -+/* --------------------- */ -+ -+struct qbman_swp { -+ const struct qbman_swp_desc *desc; -+ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it -+ * needs in here. */ -+ struct qbman_swp_sys sys; -+ /* Management commands */ -+ struct { -+#ifdef QBMAN_CHECKING -+ enum swp_mc_check { -+ swp_mc_can_start, /* call __qbman_swp_mc_start() */ -+ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */ -+ swp_mc_can_poll, /* call __qbman_swp_mc_result() */ -+ } check; -+#endif -+ uint32_t valid_bit; /* 0x00 or 0x80 */ -+ } mc; -+ /* Push dequeues */ -+ uint32_t sdq; -+ /* Volatile dequeues */ -+ struct { -+ /* VDQCR supports a "1 deep pipeline", meaning that if you know -+ * the last-submitted command is already executing in the -+ * hardware (as evidenced by at least 1 valid dequeue result), -+ * you can write another dequeue command to the register, the -+ * hardware will start executing it as soon as the -+ * already-executing command terminates. (This minimises latency -+ * and stalls.) With that in mind, this "busy" variable refers -+ * to whether or not a command can be submitted, not whether or -+ * not a previously-submitted command is still executing. In -+ * other words, once proof is seen that the previously-submitted -+ * command is executing, "vdq" is no longer "busy". */ -+ atomic_t busy; -+ uint32_t valid_bit; /* 0x00 or 0x80 */ -+ /* We need to determine when vdq is no longer busy. This depends -+ * on whether the "busy" (last-submitted) dequeue command is -+ * targetting DQRR or main-memory, and detected is based on the -+ * presence of the dequeue command's "token" showing up in -+ * dequeue entries in DQRR or main-memory (respectively). */ -+ struct qbman_result *storage; /* NULL if DQRR */ -+ } vdq; -+ /* DQRR */ -+ struct { -+ uint32_t next_idx; -+ uint32_t valid_bit; -+ uint8_t dqrr_size; -+ int reset_bug; -+ } dqrr; -+ struct { -+ uint32_t pi; -+ uint32_t pi_vb; -+ uint32_t ci; -+ int available; -+ } eqcr; -+}; -+ -+/* -------------------------- */ -+/* portal management commands */ -+/* -------------------------- */ -+ -+/* Different management commands all use this common base layer of code to issue -+ * commands and poll for results. The first function returns a pointer to where -+ * the caller should fill in their MC command (though they should ignore the -+ * verb byte), the second function commits merges in the caller-supplied command -+ * verb (which should not include the valid-bit) and submits the command to -+ * hardware, and the third function checks for a completed response (returns -+ * non-NULL if only if the response is complete). */ -+void *qbman_swp_mc_start(struct qbman_swp *p); -+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb); -+void *qbman_swp_mc_result(struct qbman_swp *p); -+ -+/* Wraps up submit + poll-for-result */ -+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, -+ uint32_t cmd_verb) -+{ -+ int loopvar; -+ qbman_swp_mc_submit(swp, cmd, cmd_verb); -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ cmd = qbman_swp_mc_result(swp); -+ } while (!cmd); -+ return cmd; -+} -+ -+/* ------------ */ -+/* qb_attr_code */ -+/* ------------ */ -+ -+/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which -+ * is either serving as a configuration command or a query result. The -+ * representation is inherently little-endian, as the indexing of the words is -+ * itself little-endian in nature and DPAA2 QBMan is little endian for anything -+ * that crosses a word boundary too (64-bit fields are the obvious examples). -+ */ -+struct qb_attr_code { -+ unsigned int word; /* which uint32_t[] array member encodes the field */ -+ unsigned int lsoffset; /* encoding offset from ls-bit */ -+ unsigned int width; /* encoding width. (bool must be 1.) */ -+}; -+ -+/* Some pre-defined codes */ -+extern struct qb_attr_code code_generic_verb; -+extern struct qb_attr_code code_generic_rslt; -+ -+/* Macros to define codes */ -+#define QB_CODE(a, b, c) { a, b, c} -+#define QB_CODE_NULL \ -+ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1) -+ -+/* Rotate a code "ms", meaning that it moves from less-significant bytes to -+ * more-significant, from less-significant words to more-significant, etc. The -+ * "ls" version does the inverse, from more-significant towards -+ * less-significant. -+ */ -+static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code, -+ unsigned int bits) -+{ -+ code->lsoffset += bits; -+ while (code->lsoffset > 31) { -+ code->word++; -+ code->lsoffset -= 32; -+ } -+} -+static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code, -+ unsigned int bits) -+{ -+ /* Don't be fooled, this trick should work because the types are -+ * unsigned. So the case that interests the while loop (the rotate has -+ * gone too far and the word count needs to compensate for it), is -+ * manifested when lsoffset is negative. But that equates to a really -+ * large unsigned value, starting with lots of "F"s. As such, we can -+ * continue adding 32 back to it until it wraps back round above zero, -+ * to a value of 31 or less... -+ */ -+ code->lsoffset -= bits; -+ while (code->lsoffset > 31) { -+ code->word--; -+ code->lsoffset += 32; -+ } -+} -+/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */ -+#define qb_attr_code_for_ms(code, bits, expr) \ -+ for (; expr; qb_attr_code_rotate_ms(code, bits)) -+#define qb_attr_code_for_ls(code, bits, expr) \ -+ for (; expr; qb_attr_code_rotate_ls(code, bits)) -+ -+/* decode a field from a cacheline */ -+static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code, -+ const uint32_t *cacheline) -+{ -+ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]); -+} -+static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code, -+ const uint64_t *cacheline) -+{ -+ return cacheline[code->word / 2]; -+} -+ -+/* encode a field to a cacheline */ -+static inline void qb_attr_code_encode(const struct qb_attr_code *code, -+ uint32_t *cacheline, uint32_t val) -+{ -+ cacheline[code->word] = -+ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word]) -+ | e32_uint32_t(code->lsoffset, code->width, val); -+} -+static inline void qb_attr_code_encode_64(const struct qb_attr_code *code, -+ uint64_t *cacheline, uint64_t val) -+{ -+ cacheline[code->word / 2] = val; -+} -+ -+/* Small-width signed values (two's-complement) will decode into medium-width -+ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to -+ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value -+ * 249. Likewise -120 would decode as 136.) This function allows the caller to -+ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit -+ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t). -+ */ -+static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code, -+ uint32_t val) -+{ -+ BUG_ON(val >= (1u << code->width)); -+ /* code->width should never exceed the width of val. If it does then a -+ * different function with larger val size must be used to translate -+ * from unsigned to signed */ -+ BUG_ON(code->width > sizeof(val) * CHAR_BIT); -+ /* If the high bit was set, it was encoding a negative */ -+ if (val >= 1u << (code->width - 1)) -+ return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) - -+ val); -+ /* Otherwise, it was encoding a positive */ -+ return (int32_t)val; -+} -+ -+/* ---------------------- */ -+/* Descriptors/cachelines */ -+/* ---------------------- */ -+ -+/* To avoid needless dynamic allocation, the driver API often gives the caller -+ * a "descriptor" type that the caller can instantiate however they like. -+ * Ultimately though, it is just a cacheline of binary storage (or something -+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for -+ * holding pre-formatted pieces of harware commands. The performance-critical -+ * code can then copy these descriptors directly into hardware command -+ * registers more efficiently than trying to construct/format commands -+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in -+ * order for the compiler to know its size, but the internal details are not -+ * exposed. The following macro is used within the driver for converting *any* -+ * descriptor pointer to a usable array pointer. The use of a macro (instead of -+ * an inline) is necessary to work with different descriptor types and to work -+ * correctly with const and non-const inputs (and similarly-qualified outputs). -+ */ -+#define qb_cl(d) (&(d)->dont_manipulate_directly[0]) -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_private.h b/drivers/net/dpaa2/qbman/driver/qbman_private.h -new file mode 100644 -index 0000000..4e50b61 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_private.h -@@ -0,0 +1,165 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+*/ -+ -+/* Perform extra checking */ -+#define QBMAN_CHECKING -+ -+/* To maximise the amount of logic that is common between the Linux driver and -+ * other targets (such as the embedded MC firmware), we pivot here between the -+ * inclusion of two platform-specific headers. -+ * -+ * The first, qbman_sys_decl.h, includes any and all required system headers as -+ * well as providing any definitions for the purposes of compatibility. The -+ * second, qbman_sys.h, is where platform-specific routines go. -+ * -+ * The point of the split is that the platform-independent code (including this -+ * header) may depend on platform-specific declarations, yet other -+ * platform-specific routines may depend on platform-independent definitions. -+ */ -+ -+#include "qbman_sys_decl.h" -+ -+/* When things go wrong, it is a convenient trick to insert a few FOO() -+ * statements in the code to trace progress. TODO: remove this once we are -+ * hacking the code less actively. -+ */ -+#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__) -+ -+/* Any time there is a register interface which we poll on, this provides a -+ * "break after x iterations" scheme for it. It's handy for debugging, eg. -+ * where you don't want millions of lines of log output from a polling loop -+ * that won't, because such things tend to drown out the earlier log output -+ * that might explain what caused the problem. (NB: put ";" after each macro!) -+ * TODO: we should probably remove this once we're done sanitising the -+ * simulator... -+ */ -+#define DBG_POLL_START(loopvar) (loopvar = 10) -+#define DBG_POLL_CHECK(loopvar) \ -+ do {if (!(loopvar--)) BUG_ON(NULL == "DBG_POLL_CHECK"); } while (0) -+ -+/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets -+ * and widths, these macro-generated encode/decode/isolate/remove inlines can -+ * be used. -+ * -+ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type), -+ * where the field is located 3 bits "up" from the least-significant bit of the -+ * register (ie. the field location within the 32-bit register corresponds to a -+ * mask of 0x0001fff8), you would do; -+ * uint16_t field = d32_uint16_t(3, 14, reg_value); -+ * -+ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE, -+ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!" -+ * operator) into a register at bit location 0x00080000 (19 bits "in" from the -+ * LS bit), do; -+ * reg_value |= e32_int(19, 1, !!field); -+ * -+ * If you wish to read-modify-write a register, such that you leave the 14-bit -+ * field as-is but have all other fields set to zero, then "i"solate the 14-bit -+ * value using; -+ * reg_value = i32_uint16_t(3, 14, reg_value); -+ * -+ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to -+ * zero) but leaving all other fields as-is; -+ * reg_val = r32_int(19, 1, reg_value); -+ * -+ */ -+#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \ -+ (uint32_t)((1 << width) - 1)) -+#define DECLARE_CODEC32(t) \ -+static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \ -+} \ -+static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \ -+} \ -+static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \ -+ uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \ -+} \ -+static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \ -+ uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return ~(MAKE_MASK32(width) << lsoffset) & val; \ -+} -+DECLARE_CODEC32(uint32_t) -+DECLARE_CODEC32(uint16_t) -+DECLARE_CODEC32(uint8_t) -+DECLARE_CODEC32(int) -+ -+ /*********************/ -+ /* Debugging assists */ -+ /*********************/ -+ -+static inline void __hexdump(unsigned long start, unsigned long end, -+ unsigned long p, size_t sz, const unsigned char *c) -+{ -+ while (start < end) { -+ unsigned int pos = 0; -+ char buf[64]; -+ int nl = 0; -+ pos += sprintf(buf + pos, "%08lx: ", start); -+ do { -+ if ((start < p) || (start >= (p + sz))) -+ pos += sprintf(buf + pos, ".."); -+ else -+ pos += sprintf(buf + pos, "%02x", *(c++)); -+ if (!(++start & 15)) { -+ buf[pos++] = '\n'; -+ nl = 1; -+ } else { -+ nl = 0; -+ if (!(start & 1)) -+ buf[pos++] = ' '; -+ if (!(start & 3)) -+ buf[pos++] = ' '; -+ } -+ } while (start & 15); -+ if (!nl) -+ buf[pos++] = '\n'; -+ buf[pos] = '\0'; -+ pr_info("%s", buf); -+ } -+} -+static inline void hexdump(const void *ptr, size_t sz) -+{ -+ unsigned long p = (unsigned long)ptr; -+ unsigned long start = p & ~(unsigned long)15; -+ unsigned long end = (p + sz + 15) & ~(unsigned long)15; -+ const unsigned char *c = ptr; -+ __hexdump(start, end, p, sz, c); -+} -+ -+#define QMAN_REV_4000 0x04000000 -+#define QMAN_REV_4100 0x04010000 -+#define QMAN_REV_4101 0x04010001 -+ -+#include "qbman_sys.h" -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_sys.h b/drivers/net/dpaa2/qbman/driver/qbman_sys.h -new file mode 100644 -index 0000000..d912ab0 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_sys.h -@@ -0,0 +1,367 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the -+ * driver. They are only included via qbman_private.h, which is itself a -+ * platform-independent file and is included by all the other driver source. -+ * -+ * qbman_sys_decl.h is included prior to all other declarations and logic, and -+ * it exists to provide compatibility with any linux interfaces our -+ * single-source driver code is dependent on (eg. kmalloc). Ie. this file -+ * provides linux compatibility. -+ * -+ * This qbman_sys.h header, on the other hand, is included *after* any common -+ * and platform-neutral declarations and logic in qbman_private.h, and exists to -+ * implement any platform-specific logic of the qbman driver itself. Ie. it is -+ * *not* to provide linux compatibility. -+ */ -+ -+/* Trace the 3 different classes of read/write access to QBMan. #undef as -+ * required. */ -+#undef QBMAN_CCSR_TRACE -+#undef QBMAN_CINH_TRACE -+#undef QBMAN_CENA_TRACE -+ -+static inline void word_copy(void *d, const void *s, unsigned int cnt) -+{ -+ uint32_t *dd = d; -+ const uint32_t *ss = s; -+ while (cnt--) -+ *(dd++) = *(ss++); -+} -+ -+/* Currently, the CENA support code expects each 32-bit word to be written in -+ * host order, and these are converted to hardware (little-endian) order on -+ * command submission. However, 64-bit quantities are must be written (and read) -+ * as two 32-bit words with the least-significant word first, irrespective of -+ * host endianness. */ -+static inline void u64_to_le32_copy(void *d, const uint64_t *s, -+ unsigned int cnt) -+{ -+ uint32_t *dd = d; -+ const uint32_t *ss = (const uint32_t *)s; -+ while (cnt--) { -+ /* TBD: the toolchain was choking on the use of 64-bit types up -+ * until recently so this works entirely with 32-bit variables. -+ * When 64-bit types become usable again, investigate better -+ * ways of doing this. */ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ *(dd++) = ss[1]; -+ *(dd++) = ss[0]; -+ ss += 2; -+#else -+ *(dd++) = *(ss++); -+ *(dd++) = *(ss++); -+#endif -+ } -+} -+static inline void u64_from_le32_copy(uint64_t *d, const void *s, -+ unsigned int cnt) -+{ -+ const uint32_t *ss = s; -+ uint32_t *dd = (uint32_t *)d; -+ while (cnt--) { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ dd[1] = *(ss++); -+ dd[0] = *(ss++); -+ dd += 2; -+#else -+ *(dd++) = *(ss++); -+ *(dd++) = *(ss++); -+#endif -+ } -+} -+ -+/* Convert a host-native 32bit value into little endian */ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+static inline uint32_t make_le32(uint32_t val) -+{ -+ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) | -+ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24); -+} -+static inline uint32_t make_le24(uint32_t val) -+{ -+ return (((val & 0xff) << 16) | (val & 0xff00) | -+ ((val & 0xff0000) >> 16)); -+} -+#else -+#define make_le32(val) (val) -+#define make_le24(val) (val) -+#endif -+static inline void make_le32_n(uint32_t *val, unsigned int num) -+{ -+ while (num--) { -+ *val = make_le32(*val); -+ val++; -+ } -+} -+ -+ /******************/ -+ /* Portal access */ -+ /******************/ -+struct qbman_swp_sys { -+ /* On GPP, the sys support for qbman_swp is here. The CENA region isi -+ * not an mmap() of the real portal registers, but an allocated -+ * place-holder, because the actual writes/reads to/from the portal are -+ * marshalled from these allocated areas using QBMan's "MC access -+ * registers". CINH accesses are atomic so there's no need for a -+ * place-holder. */ -+ uint8_t *cena; -+ uint8_t __iomem *addr_cena; -+ uint8_t __iomem *addr_cinh; -+ uint32_t idx; -+ enum qbman_eqcr_mode eqcr_mode; -+}; -+ -+/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal -+ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH) -+ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index -+ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal) -+ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE) -+ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete) -+ */ -+ -+static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset, -+ uint32_t val) -+{ -+ -+ __raw_writel(val, s->addr_cinh + offset); -+#ifdef QBMAN_CINH_TRACE -+ pr_info("qbman_cinh_write(%p:%d:0x%03x) 0x%08x\n", -+ s->addr_cinh, s->idx, offset, val); -+#endif -+} -+ -+static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset) -+{ -+ uint32_t reg = __raw_readl(s->addr_cinh + offset); -+#ifdef QBMAN_CINH_TRACE -+ pr_info("qbman_cinh_read(%p:%d:0x%03x) 0x%08x\n", -+ s->addr_cinh, s->idx, offset, reg); -+#endif -+ return reg; -+} -+ -+static inline void *qbman_cena_write_start(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ void *shadow = s->cena + offset; -+ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_start(%p:%d:0x%03x) %p\n", -+ s->addr_cena, s->idx, offset, shadow); -+#endif -+ BUG_ON(offset & 63); -+ dcbz(shadow); -+ return shadow; -+} -+ -+static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n", -+ s->addr_cena, s->idx, offset); -+#endif -+ BUG_ON(offset & 63); -+ return (s->addr_cena + offset); -+} -+ -+static inline void qbman_cena_write_complete(struct qbman_swp_sys *s, -+ uint32_t offset, void *cmd) -+{ -+ const uint32_t *shadow = cmd; -+ int loop; -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x) %p\n", -+ s->addr_cena, s->idx, offset, shadow); -+ hexdump(cmd, 64); -+#endif -+ for (loop = 15; loop >= 1; loop--) -+ __raw_writel(shadow[loop], s->addr_cena + -+ offset + loop * 4); -+ lwsync(); -+ __raw_writel(shadow[0], s->addr_cena + offset); -+ dcbf(s->addr_cena + offset); -+} -+ -+static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n", -+ s->addr_cena, s->idx, offset); -+ hexdump(cmd, 64); -+#endif -+ dcbf(s->addr_cena + offset); -+} -+ -+static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ return __raw_readl(s->addr_cena + offset); -+} -+ -+static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset) -+{ -+ uint32_t *shadow = (uint32_t *)(s->cena + offset); -+ unsigned int loop; -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n", -+ s->addr_cena, s->idx, offset, shadow); -+#endif -+ -+ for (loop = 0; loop < 16; loop++) -+ shadow[loop] = __raw_readl(s->addr_cena + offset -+ + loop * 4); -+#ifdef QBMAN_CENA_TRACE -+ hexdump(shadow, 64); -+#endif -+ return shadow; -+} -+ -+static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n", -+ s->addr_cena, s->idx, offset, shadow); -+#endif -+ -+#ifdef QBMAN_CENA_TRACE -+ hexdump(shadow, 64); -+#endif -+ return s->addr_cena + offset; -+} -+ -+static inline void qbman_cena_invalidate(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ dccivac(s->addr_cena + offset); -+} -+ -+static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ dccivac(s->addr_cena + offset); -+ prefetch_for_load(s->addr_cena + offset); -+} -+ -+static inline void qbman_cena_prefetch(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ prefetch_for_load(s->addr_cena + offset); -+} -+ -+ /******************/ -+ /* Portal support */ -+ /******************/ -+ -+/* The SWP_CFG portal register is special, in that it is used by the -+ * platform-specific code rather than the platform-independent code in -+ * qbman_portal.c. So use of it is declared locally here. */ -+#define QBMAN_CINH_SWP_CFG 0xd00 -+ -+/* For MC portal use, we always configure with -+ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4) -+ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x2) -+ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3) -+ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2) -+ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x2) -+ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- TRUE) -+ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE) -+ * SE is (SWP_CFG,3,1) - memory stashing enable (<- TRUE) -+ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE) -+ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- TRUE) -+ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- TRUE) -+ */ -+static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn, -+ uint8_t est, uint8_t rpm, uint8_t dcm, -+ uint8_t epm, int sd, int sp, int se, -+ int dp, int de, int ep) -+{ -+ uint32_t reg; -+ reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) | -+ e32_uint8_t(16, 3, est) | -+ e32_uint8_t(12, 2, rpm) | e32_uint8_t(10, 2, dcm) | -+ e32_uint8_t(8, 2, epm) | e32_int(5, 1, sd) | -+ e32_int(4, 1, sp) | e32_int(3, 1, se) | e32_int(2, 1, dp) | -+ e32_int(1, 1, de) | e32_int(0, 1, ep) | e32_uint8_t(14, 1, wn); -+ return reg; -+} -+ -+static inline int qbman_swp_sys_init(struct qbman_swp_sys *s, -+ const struct qbman_swp_desc *d, -+ uint8_t dqrr_size) -+{ -+ uint32_t reg; -+ s->addr_cena = d->cena_bar; -+ s->addr_cinh = d->cinh_bar; -+ s->idx = (uint32_t)d->idx; -+ s->cena = (void *)get_zeroed_page(GFP_KERNEL); -+ if (!s->cena) { -+ pr_err("Could not allocate page for cena shadow\n"); -+ return -1; -+ } -+ s->eqcr_mode = d->eqcr_mode; -+ BUG_ON(d->idx < 0); -+#ifdef QBMAN_CHECKING -+ /* We should never be asked to initialise for a portal that isn't in -+ * the power-on state. (Ie. don't forget to reset portals when they are -+ * decommissioned!) -+ */ -+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); -+ BUG_ON(reg); -+#endif -+ if (s->eqcr_mode == qman_eqcr_vb_array) -+ reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 1, 1, 1, 1, -+ 1, 1); -+ else -+ reg = qbman_set_swp_cfg(dqrr_size, 0, 2, 3, 2, 2, 1, 1, 1, 1, -+ 1, 1); -+ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg); -+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); -+ if (!reg) { -+ pr_err("The portal %d is not enabled!\n", s->idx); -+ kfree(s->cena); -+ return -1; -+ } -+ return 0; -+} -+ -+static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s) -+{ -+ free_page((unsigned long)s->cena); -+} -+ -+static inline void *qbman_cena_write_start_wo_shadow_fast(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ #ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n", -+ s->addr_cena, s->idx, offset); -+ #endif -+ BUG_ON(offset & 63); -+ return (s->addr_cena + offset); -+} -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h b/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h -new file mode 100644 -index 0000000..ae7ef97 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h -@@ -0,0 +1,68 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+ -+/* Sanity check */ -+#if (__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) && \ -+ (__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__) -+#error "Unknown endianness!" -+#endif -+ -+/* The platform-independent code shouldn't need endianness, except for -+ * weird/fast-path cases like qbman_result_has_token(), which needs to -+ * perform a passive and endianness-specific test on a read-only data structure -+ * very quickly. It's an exception, and this symbol is used for that case. */ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+#define DQRR_TOK_OFFSET 0 -+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24 -+#define SCN_STATE_OFFSET_IN_MEM 8 -+#define SCN_RID_OFFSET_IN_MEM 8 -+#else -+#define DQRR_TOK_OFFSET 24 -+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0 -+#define SCN_STATE_OFFSET_IN_MEM 16 -+#define SCN_RID_OFFSET_IN_MEM 0 -+#endif -+ -+/* Similarly-named functions */ -+#define upper32(a) upper_32_bits(a) -+#define lower32(a) lower_32_bits(a) -+ -+ /****************/ -+ /* arch assists */ -+ /****************/ -+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } -+#define lwsync() { asm volatile("dmb st" : : : "memory"); } -+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); } -+#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); } -+static inline void prefetch_for_load(void *p) -+{ -+ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); -+} -+static inline void prefetch_for_store(void *p) -+{ -+ asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p)); -+} -diff --git a/drivers/net/dpaa2/qbman/include/compat.h b/drivers/net/dpaa2/qbman/include/compat.h -new file mode 100644 -index 0000000..0d14b58 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/include/compat.h -@@ -0,0 +1,597 @@ -+/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef HEADER_COMPAT_H -+#define HEADER_COMPAT_H -+ -+#include -+ -+#ifndef _GNU_SOURCE -+#define _GNU_SOURCE -+#endif -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* The following definitions are primarily to allow the single-source driver -+ * interfaces to be included by arbitrary program code. Ie. for interfaces that -+ * are also available in kernel-space, these definitions provide compatibility -+ * with certain attributes and types used in those interfaces. */ -+ -+/* Required compiler attributes */ -+#define __maybe_unused __attribute__((unused)) -+#define __always_unused __attribute__((unused)) -+#define __packed __attribute__((__packed__)) -+#define __user -+#define likely(x) __builtin_expect(!!(x), 1) -+#define unlikely(x) __builtin_expect(!!(x), 0) -+#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) -+#define container_of(p, t, f) (t *)((void *)p - offsetof(t, f)) -+#define __stringify_1(x) #x -+#define __stringify(x) __stringify_1(x) -+#define panic(x) \ -+do { \ -+ printf("panic: %s", x); \ -+ abort(); \ -+} while (0) -+ -+#ifdef ARRAY_SIZE -+#undef ARRAY_SIZE -+#endif -+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) -+ -+/* Required types */ -+typedef uint8_t u8; -+typedef uint16_t u16; -+typedef uint32_t u32; -+typedef uint64_t u64; -+typedef uint64_t dma_addr_t; -+typedef cpu_set_t cpumask_t; -+#define spinlock_t pthread_mutex_t -+typedef u32 compat_uptr_t; -+static inline void __user *compat_ptr(compat_uptr_t uptr) -+{ -+ return (void __user *)(unsigned long)uptr; -+} -+ -+static inline compat_uptr_t ptr_to_compat(void __user *uptr) -+{ -+ return (u32)(unsigned long)uptr; -+} -+ -+/* I/O operations */ -+static inline u32 in_be32(volatile void *__p) -+{ -+ volatile u32 *p = __p; -+ return *p; -+} -+static inline void out_be32(volatile void *__p, u32 val) -+{ -+ volatile u32 *p = __p; -+ *p = val; -+} -+ -+/* Debugging */ -+#define prflush(fmt, args...) \ -+ do { \ -+ printf(fmt, ##args); \ -+ fflush(stdout); \ -+ } while (0) -+#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args) -+#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args) -+#define pr_warning(fmt, args...) prflush("WARN:" fmt, ##args) -+#define pr_info(fmt, args...) prflush(fmt, ##args) -+ -+#define BUG() abort() -+#ifdef CONFIG_BUGON -+#ifdef pr_debug -+#undef pr_debug -+#endif -+#define pr_debug(fmt, args...) printf(fmt, ##args) -+#define BUG_ON(c) \ -+do { \ -+ if (c) { \ -+ pr_crit("BUG: %s:%d\n", __FILE__, __LINE__); \ -+ abort(); \ -+ } \ -+} while(0) -+#define might_sleep_if(c) BUG_ON(c) -+#define msleep(x) \ -+do { \ -+ pr_crit("BUG: illegal call %s:%d\n", __FILE__, __LINE__); \ -+ exit(EXIT_FAILURE); \ -+} while(0) -+#else -+#ifdef pr_debug -+#undef pr_debug -+#endif -+#define pr_debug(fmt, args...) do { ; } while(0) -+#define BUG_ON(c) do { ; } while(0) -+#define might_sleep_if(c) do { ; } while(0) -+#define msleep(x) do { ; } while(0) -+#endif -+#define WARN_ON(c, str) \ -+do { \ -+ static int warned_##__LINE__; \ -+ if ((c) && !warned_##__LINE__) { \ -+ pr_warning("%s\n", str); \ -+ pr_warning("(%s:%d)\n", __FILE__, __LINE__); \ -+ warned_##__LINE__ = 1; \ -+ } \ -+} while (0) -+ -+#define ALIGN(x, a) (((x) + ((typeof(x))(a) - 1)) & ~((typeof(x))(a) - 1)) -+ -+/****************/ -+/* Linked-lists */ -+/****************/ -+ -+struct list_head { -+ struct list_head *prev; -+ struct list_head *next; -+}; -+ -+#define LIST_HEAD(n) \ -+struct list_head n = { \ -+ .prev = &n, \ -+ .next = &n \ -+} -+#define INIT_LIST_HEAD(p) \ -+do { \ -+ struct list_head *__p298 = (p); \ -+ __p298->prev = __p298->next =__p298; \ -+} while(0) -+#define list_entry(node, type, member) \ -+ (type *)((void *)node - offsetof(type, member)) -+#define list_empty(p) \ -+({ \ -+ const struct list_head *__p298 = (p); \ -+ ((__p298->next == __p298) && (__p298->prev == __p298)); \ -+}) -+#define list_add(p,l) \ -+do { \ -+ struct list_head *__p298 = (p); \ -+ struct list_head *__l298 = (l); \ -+ __p298->next = __l298->next; \ -+ __p298->prev = __l298; \ -+ __l298->next->prev = __p298; \ -+ __l298->next = __p298; \ -+} while(0) -+#define list_add_tail(p,l) \ -+do { \ -+ struct list_head *__p298 = (p); \ -+ struct list_head *__l298 = (l); \ -+ __p298->prev = __l298->prev; \ -+ __p298->next = __l298; \ -+ __l298->prev->next = __p298; \ -+ __l298->prev = __p298; \ -+} while(0) -+#define list_for_each(i, l) \ -+ for (i = (l)->next; i != (l); i = i->next) -+#define list_for_each_safe(i, j, l) \ -+ for (i = (l)->next, j = i->next; i != (l); \ -+ i = j, j = i->next) -+#define list_for_each_entry(i, l, name) \ -+ for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \ -+ i = list_entry(i->name.next, typeof(*i), name)) -+#define list_for_each_entry_safe(i, j, l, name) \ -+ for (i = list_entry((l)->next, typeof(*i), name), \ -+ j = list_entry(i->name.next, typeof(*j), name); \ -+ &i->name != (l); \ -+ i = j, j = list_entry(j->name.next, typeof(*j), name)) -+#define list_del(i) \ -+do { \ -+ (i)->next->prev = (i)->prev; \ -+ (i)->prev->next = (i)->next; \ -+} while(0) -+ -+/* Other miscellaneous interfaces our APIs depend on; */ -+ -+#define lower_32_bits(x) ((u32)(x)) -+#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16)) -+ -+/* Compiler/type stuff */ -+typedef unsigned int gfp_t; -+typedef uint32_t phandle; -+ -+#define noinline __attribute__((noinline)) -+#define __iomem -+#define EINTR 4 -+#define ENODEV 19 -+#define MODULE_AUTHOR(s) -+#define MODULE_LICENSE(s) -+#define MODULE_DESCRIPTION(s) -+#define MODULE_PARM_DESC(x, y) -+#define EXPORT_SYMBOL(x) -+#define module_init(fn) int m_##fn(void) { return fn(); } -+#define module_exit(fn) void m_##fn(void) { fn(); } -+#define module_param(x, y, z) -+#define module_param_string(w, x, y, z) -+#define GFP_KERNEL 0 -+#define __KERNEL__ -+#define __init -+#define __raw_readb(p) *(const volatile unsigned char *)(p) -+#define __raw_readl(p) *(const volatile unsigned int *)(p) -+#define __raw_writel(v, p) \ -+do { \ -+ *(volatile unsigned int *)(p) = (v); \ -+} while (0) -+ -+/* printk() stuff */ -+#define printk(fmt, args...) do_not_use_printk -+#define nada(fmt, args...) do { ; } while(0) -+ -+/* Interrupt stuff */ -+typedef uint32_t irqreturn_t; -+#define IRQ_HANDLED 0 -+ -+/* memcpy() stuff - when you know alignments in advance */ -+#ifdef CONFIG_TRY_BETTER_MEMCPY -+static inline void copy_words(void *dest, const void *src, size_t sz) -+{ -+ u32 *__dest = dest; -+ const u32 *__src = src; -+ size_t __sz = sz >> 2; -+ BUG_ON((unsigned long)dest & 0x3); -+ BUG_ON((unsigned long)src & 0x3); -+ BUG_ON(sz & 0x3); -+ while (__sz--) -+ *(__dest++) = *(__src++); -+} -+static inline void copy_shorts(void *dest, const void *src, size_t sz) -+{ -+ u16 *__dest = dest; -+ const u16 *__src = src; -+ size_t __sz = sz >> 1; -+ BUG_ON((unsigned long)dest & 0x1); -+ BUG_ON((unsigned long)src & 0x1); -+ BUG_ON(sz & 0x1); -+ while (__sz--) -+ *(__dest++) = *(__src++); -+} -+static inline void copy_bytes(void *dest, const void *src, size_t sz) -+{ -+ u8 *__dest = dest; -+ const u8 *__src = src; -+ while (sz--) -+ *(__dest++) = *(__src++); -+} -+#else -+#define copy_words memcpy -+#define copy_shorts memcpy -+#define copy_bytes memcpy -+#endif -+ -+/* Spinlock stuff */ -+#define spinlock_t pthread_mutex_t -+#define __SPIN_LOCK_UNLOCKED(x) PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP -+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -+#define spin_lock_init(x) \ -+ do { \ -+ __maybe_unused int __foo; \ -+ pthread_mutexattr_t __foo_attr; \ -+ __foo = pthread_mutexattr_init(&__foo_attr); \ -+ BUG_ON(__foo); \ -+ __foo = pthread_mutexattr_settype(&__foo_attr, \ -+ PTHREAD_MUTEX_ADAPTIVE_NP); \ -+ BUG_ON(__foo); \ -+ __foo = pthread_mutex_init(x, &__foo_attr); \ -+ BUG_ON(__foo); \ -+ } while (0) -+#define spin_lock(x) \ -+ do { \ -+ __maybe_unused int __foo = pthread_mutex_lock(x); \ -+ BUG_ON(__foo); \ -+ } while (0) -+#define spin_unlock(x) \ -+ do { \ -+ __maybe_unused int __foo = pthread_mutex_unlock(x); \ -+ BUG_ON(__foo); \ -+ } while (0) -+#define spin_lock_irq(x) do { \ -+ local_irq_disable(); \ -+ spin_lock(x); \ -+ } while (0) -+#define spin_unlock_irq(x) do { \ -+ spin_unlock(x); \ -+ local_irq_enable(); \ -+ } while (0) -+#define spin_lock_irqsave(x, f) do { spin_lock_irq(x); } while (0) -+#define spin_unlock_irqrestore(x, f) do { spin_unlock_irq(x); } while (0) -+ -+#define raw_spinlock_t spinlock_t -+#define raw_spin_lock_init(x) spin_lock_init(x) -+#define raw_spin_lock_irqsave(x, f) spin_lock(x) -+#define raw_spin_unlock_irqrestore(x, f) spin_unlock(x) -+ -+/* Completion stuff */ -+#define DECLARE_COMPLETION(n) int n = 0; -+#define complete(n) \ -+do { \ -+ *n = 1; \ -+} while(0) -+#define wait_for_completion(n) \ -+do { \ -+ while (!*n) { \ -+ bman_poll(); \ -+ qman_poll(); \ -+ } \ -+ *n = 0; \ -+} while(0) -+ -+/* Platform device stuff */ -+struct platform_device { void *dev; }; -+static inline struct -+platform_device *platform_device_alloc(const char *name __always_unused, -+ int id __always_unused) -+{ -+ struct platform_device *ret = malloc(sizeof(*ret)); -+ if (ret) -+ ret->dev = NULL; -+ return ret; -+} -+#define platform_device_add(pdev) 0 -+#define platform_device_del(pdev) do { ; } while(0) -+static inline void platform_device_put(struct platform_device *pdev) -+{ -+ free(pdev); -+} -+struct resource { -+ int unused; -+}; -+ -+/* Allocator stuff */ -+#define kmalloc(sz, t) malloc(sz) -+#define vmalloc(sz) malloc(sz) -+#define kfree(p) do { if (p) free(p); } while (0) -+static inline void *kzalloc(size_t sz, gfp_t __foo __always_unused) -+{ -+ void *ptr = malloc(sz); -+ if (ptr) -+ memset(ptr, 0, sz); -+ return ptr; -+} -+static inline unsigned long get_zeroed_page(gfp_t __foo __always_unused) -+{ -+ void *p; -+ if (posix_memalign(&p, 4096, 4096)) -+ return 0; -+ memset(p, 0, 4096); -+ return (unsigned long)p; -+} -+static inline void free_page(unsigned long p) -+{ -+ free((void *)p); -+} -+struct kmem_cache { -+ size_t sz; -+ size_t align; -+}; -+#define SLAB_HWCACHE_ALIGN 0 -+static inline struct kmem_cache *kmem_cache_create(const char *n __always_unused, -+ size_t sz, size_t align, unsigned long flags __always_unused, -+ void (*c)(void *) __always_unused) -+{ -+ struct kmem_cache *ret = malloc(sizeof(*ret)); -+ if (ret) { -+ ret->sz = sz; -+ ret->align = align; -+ } -+ return ret; -+} -+static inline void kmem_cache_destroy(struct kmem_cache *c) -+{ -+ free(c); -+} -+static inline void *kmem_cache_alloc(struct kmem_cache *c, gfp_t f __always_unused) -+{ -+ void *p; -+ if (posix_memalign(&p, c->align, c->sz)) -+ return NULL; -+ return p; -+} -+static inline void kmem_cache_free(struct kmem_cache *c __always_unused, void *p) -+{ -+ free(p); -+} -+static inline void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t f) -+{ -+ void *ret = kmem_cache_alloc(c, f); -+ if (ret) -+ memset(ret, 0, c->sz); -+ return ret; -+} -+ -+/* Bitfield stuff. */ -+#define BITS_PER_ULONG (sizeof(unsigned long) << 3) -+#define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6) -+#define BITS_MASK(idx) ((unsigned long)1 << ((idx) & (BITS_PER_ULONG - 1))) -+#define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG) -+static inline unsigned long test_bits(unsigned long mask, -+ volatile unsigned long *p) -+{ -+ return *p & mask; -+} -+static inline int test_bit(int idx, volatile unsigned long *bits) -+{ -+ return test_bits(BITS_MASK(idx), bits + BITS_IDX(idx)); -+} -+static inline void set_bits(unsigned long mask, volatile unsigned long *p) -+{ -+ *p |= mask; -+} -+static inline void set_bit(int idx, volatile unsigned long *bits) -+{ -+ set_bits(BITS_MASK(idx), bits + BITS_IDX(idx)); -+} -+static inline void clear_bits(unsigned long mask, volatile unsigned long *p) -+{ -+ *p &= ~mask; -+} -+static inline void clear_bit(int idx, volatile unsigned long *bits) -+{ -+ clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx)); -+} -+static inline unsigned long test_and_set_bits(unsigned long mask, -+ volatile unsigned long *p) -+{ -+ unsigned long ret = test_bits(mask, p); -+ set_bits(mask, p); -+ return ret; -+} -+static inline int test_and_set_bit(int idx, volatile unsigned long *bits) -+{ -+ int ret = test_bit(idx, bits); -+ set_bit(idx, bits); -+ return ret; -+} -+static inline int test_and_clear_bit(int idx, volatile unsigned long *bits) -+{ -+ int ret = test_bit(idx, bits); -+ clear_bit(idx, bits); -+ return ret; -+} -+static inline int find_next_zero_bit(unsigned long *bits, int limit, int idx) -+{ -+ while ((++idx < limit) && test_bit(idx, bits)) -+ ; -+ return idx; -+} -+static inline int find_first_zero_bit(unsigned long *bits, int limit) -+{ -+ int idx = 0; -+ while (test_bit(idx, bits) && (++idx < limit)) -+ ; -+ return idx; -+} -+ -+static inline u64 div64_u64(u64 n, u64 d) -+{ -+ return n / d; -+} -+ -+#define dmb(opt) { asm volatile("dmb " #opt : : : "memory"); } -+#define smp_mb() dmb(ish) -+ -+/* Atomic stuff */ -+typedef struct { -+ int counter; -+} atomic_t; -+ -+#define atomic_read(v) (*(volatile int *)&(v)->counter) -+#define atomic_set(v, i) (((v)->counter) = (i)) -+static inline void atomic_add(int i, atomic_t *v) -+{ -+ unsigned long tmp; -+ int result; -+ -+ asm volatile("// atomic_add\n" -+ "1: ldxr %w0, %2\n" -+ " add %w0, %w0, %w3\n" -+ " stxr %w1, %w0, %2\n" -+ " cbnz %w1, 1b" -+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -+ : "Ir" (i)); -+} -+ -+static inline int atomic_add_return(int i, atomic_t *v) -+{ -+ unsigned long tmp; -+ int result; -+ -+ asm volatile("// atomic_add_return\n" -+ "1: ldxr %w0, %2\n" -+ " add %w0, %w0, %w3\n" -+ " stlxr %w1, %w0, %2\n" -+ " cbnz %w1, 1b" -+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -+ : "Ir" (i) -+ : "memory"); -+ -+ smp_mb(); -+ return result; -+} -+ -+static inline void atomic_sub(int i, atomic_t *v) -+{ -+ unsigned long tmp; -+ int result; -+ -+ asm volatile("// atomic_sub\n" -+ "1: ldxr %w0, %2\n" -+ " sub %w0, %w0, %w3\n" -+ " stxr %w1, %w0, %2\n" -+ " cbnz %w1, 1b" -+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -+ : "Ir" (i)); -+} -+ -+static inline int atomic_sub_return(int i, atomic_t *v) -+{ -+ unsigned long tmp; -+ int result; -+ -+ asm volatile("// atomic_sub_return\n" -+ "1: ldxr %w0, %2\n" -+ " sub %w0, %w0, %w3\n" -+ " stlxr %w1, %w0, %2\n" -+ " cbnz %w1, 1b" -+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -+ : "Ir" (i) -+ : "memory"); -+ -+ smp_mb(); -+ return result; -+} -+ -+#define atomic_inc(v) atomic_add(1, v) -+#define atomic_dec(v) atomic_sub(1, v) -+ -+#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -+#define atomic_inc_return(v) (atomic_add_return(1, v)) -+#define atomic_dec_return(v) (atomic_sub_return(1, v)) -+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) -+ -+#endif /* HEADER_COMPAT_H */ -diff --git a/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h -new file mode 100644 -index 0000000..4cb784c ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h -@@ -0,0 +1,151 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_QBMAN_BASE_H -+#define _FSL_QBMAN_BASE_H -+ -+/** -+ * DOC: QBMan basic structures -+ * -+ * The QBMan block descriptor, software portal descriptor and Frame descriptor -+ * are defined here. -+ * -+ */ -+ -+/** -+ * struct qbman_block_desc - qbman block descriptor structure -+ * @ccsr_reg_bar: CCSR register map. -+ * @irq_rerr: Recoverable error interrupt line. -+ * @irq_nrerr: Non-recoverable error interrupt line -+ * -+ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not -+ * control this QBMan instance, these values may simply be place-holders. The -+ * idea is simply that we be able to distinguish between them, eg. so that SWP -+ * descriptors can identify which QBMan instance they belong to. -+ */ -+struct qbman_block_desc { -+ void *ccsr_reg_bar; -+ int irq_rerr; -+ int irq_nrerr; -+}; -+ -+enum qbman_eqcr_mode { -+ qman_eqcr_vb_ring = 2, /* Valid bit, with eqcr in ring mode */ -+ qman_eqcr_vb_array, /* Valid bit, with eqcr in array mode */ -+}; -+ -+/** -+ * struct qbman_swp_desc - qbman software portal descriptor structure -+ * @block: The QBMan instance. -+ * @cena_bar: Cache-enabled portal register map. -+ * @cinh_bar: Cache-inhibited portal register map. -+ * @irq: -1 if unused (or unassigned) -+ * @idx: SWPs within a QBMan are indexed. -1 if opaque to the user. -+ * @qman_version: the qman version. -+ * @eqcr_mode: Select the eqcr mode, currently only valid bit ring mode and -+ * valid bit array mode are supported. -+ * -+ * Descriptor for a QBMan software portal, expressed in terms that make sense to -+ * the user context. Ie. on MC, this information is likely to be true-physical, -+ * and instantiated statically at compile-time. On GPP, this information is -+ * likely to be obtained via "discovery" over a partition's "MC bus" -+ * (ie. in response to a MC portal command), and would take into account any -+ * virtualisation of the GPP user's address space and/or interrupt numbering. -+ */ -+struct qbman_swp_desc { -+ const struct qbman_block_desc *block; -+ uint8_t *cena_bar; -+ uint8_t *cinh_bar; -+ int irq; -+ int idx; -+ uint32_t qman_version; -+ enum qbman_eqcr_mode eqcr_mode; -+}; -+ -+/* Driver object for managing a QBMan portal */ -+struct qbman_swp; -+ -+/** -+ * struct qbman_fd - basci structure for qbman frame descriptor -+ * @words: for easier/faster copying the whole FD structure. -+ * @addr_lo: the lower 32 bits of the address in FD. -+ * @addr_hi: the upper 32 bits of the address in FD. -+ * @len: the length field in FD. -+ * @bpid_offset: represent the bpid and offset fields in FD. offset in -+ * the MS 16 bits, BPID in the LS 16 bits. -+ * @frc: frame context -+ * @ctrl: the 32bit control bits including dd, sc,... va, err. -+ * @flc_lo: the lower 32bit of flow context. -+ * @flc_hi: the upper 32bits of flow context. -+ * -+ * Place-holder for FDs, we represent it via the simplest form that we need for -+ * now. Different overlays may be needed to support different options, etc. (It -+ * is impractical to define One True Struct, because the resulting encoding -+ * routines (lots of read-modify-writes) would be worst-case performance whether -+ * or not circumstances required them.) -+ * -+ * Note, as with all data-structures exchanged between software and hardware (be -+ * they located in the portal register map or DMA'd to and from main-memory), -+ * the driver ensures that the caller of the driver API sees the data-structures -+ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words -+ * contained within this structure are represented in host-endianness, even if -+ * hardware always treats them as little-endian. As such, if any of these fields -+ * are interpreted in a binary (rather than numerical) fashion by hardware -+ * blocks (eg. accelerators), then the user should be careful. We illustrate -+ * with an example; -+ * -+ * Suppose the desired behaviour of an accelerator is controlled by the "frc" -+ * field of the FDs that are sent to it. Suppose also that the behaviour desired -+ * by the user corresponds to an "frc" value which is expressed as the literal -+ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit -+ * value in which 0xfe is the first byte and 0xba is the last byte, and as -+ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If -+ * the software is little-endian also, this can simply be achieved by setting -+ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set -+ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is -+ * to treat the 32-bit words as numerical values, in which the offset of a field -+ * from the beginning of the first byte (as required or generated by hardware) -+ * is numerically encoded by a left-shift (ie. by raising the field to a -+ * corresponding power of 2). Ie. in the current example, software could set -+ * "frc" in the following way, and it would work correctly on both little-endian -+ * and big-endian operation; -+ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24); -+ */ -+struct qbman_fd { -+ union { -+ uint32_t words[8]; -+ struct qbman_fd_simple { -+ uint32_t addr_lo; -+ uint32_t addr_hi; -+ uint32_t len; -+ uint32_t bpid_offset; -+ uint32_t frc; -+ uint32_t ctrl; -+ uint32_t flc_lo; -+ uint32_t flc_hi; -+ } simple; -+ }; -+}; -+ -+#endif /* !_FSL_QBMAN_BASE_H */ -diff --git a/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h -new file mode 100644 -index 0000000..ddcabcf ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h -@@ -0,0 +1,1089 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_QBMAN_PORTAL_H -+#define _FSL_QBMAN_PORTAL_H -+ -+#include -+ -+/** -+ * DOC - QBMan portal APIs to implement the following functions: -+ * - Initialize and destroy Software portal object. -+ * - Read and write Software portal interrupt registers. -+ * - Enqueue, including setting the enqueue descriptor, and issuing enqueue -+ * command etc. -+ * - Dequeue, including setting the dequeue descriptor, issuing dequeue command, -+ * parsing the dequeue response in DQRR and memeory, parsing the state change -+ * notifications etc. -+ * - Release, including setting the release descriptor, and issuing the buffer -+ * release command. -+ * - Acquire, acquire the buffer from the given buffer pool. -+ * - FQ management. -+ * - Channel management, enable/disable CDAN with or without context. -+ */ -+ -+/** -+ * qbman_swp_init() - Create a functional object representing the given -+ * QBMan portal descriptor. -+ * @d: the given qbman swp descriptor -+ * -+ * Return qbman_swp portal object for success, NULL if the object cannot -+ * be created. -+ */ -+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); -+ -+/** -+ * qbman_swp_finish() - Create and destroy a functional object representing -+ * the given QBMan portal descriptor. -+ * @p: the qbman_swp object to be destroyed. -+ * -+ */ -+void qbman_swp_finish(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_get_desc() - Get the descriptor of the given portal object. -+ * @p: the given portal object. -+ * -+ * Return the descriptor for this portal. -+ */ -+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *); -+ -+ /**************/ -+ /* Interrupts */ -+ /**************/ -+ -+/* EQCR ring interrupt */ -+#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001) -+/* Enqueue command dispatched interrupt */ -+#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002) -+/* DQRR non-empty interrupt */ -+#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004) -+/* RCR ring interrupt */ -+#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008) -+/* Release command dispatched interrupt */ -+#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010) -+/* Volatile dequeue command interrupt */ -+#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020) -+ -+/** -+ * qbman_swp_interrupt_get_vanish() - Get the data in software portal -+ * interrupt status disable register. -+ * @p: the given software portal object. -+ * -+ * Return the settings in SWP_ISDR register. -+ */ -+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_interrupt_set_vanish() - Set the data in software portal -+ * interrupt status disable register. -+ * @p: the given software portal object. -+ * @mask: The value to set in SWP_IDSR register. -+ */ -+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_read_status() - Get the data in software portal -+ * interrupt status register. -+ * @p: the given software portal object. -+ * -+ * Return the settings in SWP_ISR register. -+ */ -+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_interrupt_clear_status() - Set the data in software portal -+ * interrupt status register. -+ * @p: the given software portal object. -+ * @mask: The value to set in SWP_ISR register. -+ */ -+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_get_trigger() - Get the data in software portal -+ * interrupt enable register. -+ * @p: the given software portal object. -+ * -+ * Return the settings in SWP_IER register. -+ */ -+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_interrupt_set_trigger() - Set the data in software portal -+ * interrupt enable register. -+ * @p: the given software portal object. -+ * @mask: The value to set in SWP_IER register. -+ */ -+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_get_inhibit() - Get the data in software portal -+ * interrupt inhibit register. -+ * @p: the given software portal object. -+ * -+ * Return the settings in SWP_IIR register. -+ */ -+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_interrupt_set_inhibit() - Set the data in software portal -+ * interrupt inhibit register. -+ * @p: the given software portal object. -+ * @mask: The value to set in SWP_IIR register. -+ */ -+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit); -+ -+ /************/ -+ /* Dequeues */ -+ /************/ -+ -+/** -+ * struct qbman_result - structure for qbman dequeue response and/or -+ * notification. -+ * @dont_manipulate_directly: the 16 32bit data to represent the whole -+ * possible qbman dequeue result. -+ */ -+struct qbman_result { -+ uint32_t dont_manipulate_directly[16]; -+}; -+ -+/* TODO: -+ *A DQRI interrupt can be generated when there are dequeue results on the -+ * portal's DQRR (this mechanism does not deal with "pull" dequeues to -+ * user-supplied 'storage' addresses). There are two parameters to this -+ * interrupt source, one is a threshold and the other is a timeout. The -+ * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or -+ * if the ring has been non-empty for been longer than 'timeout' nanoseconds. -+ * For timeout, an approximation to the desired nanosecond-granularity value is -+ * made, so there are get and set APIs to allow the user to see what actual -+ * timeout is set (compared to the timeout that was requested). */ -+int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh); -+int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout); -+int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout); -+ -+ -+/* ------------------- */ -+/* Push-mode dequeuing */ -+/* ------------------- */ -+ -+/* The user of a portal can enable and disable push-mode dequeuing of up to 16 -+ * channels independently. It does not specify this toggling by channel IDs, but -+ * rather by specifing the index (from 0 to 15) that has been mapped to the -+ * desired channel. -+ */ -+ -+/** -+ * qbman_swp_push_get() - Get the push dequeue setup. -+ * @s: the software portal object. -+ * @channel_idx: the channel index to query. -+ * @enabled: returned boolean to show whether the push dequeue is enabled for -+ * the given channel. -+ */ -+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled); -+ -+/** -+ * qbman_swp_push_set() - Enable or disable push dequeue. -+ * @s: the software portal object. -+ * @channel_idx: the channel index.. -+ * @enable: enable or disable push dequeue. -+ * -+ * The user of a portal can enable and disable push-mode dequeuing of up to 16 -+ * channels independently. It does not specify this toggling by channel IDs, but -+ * rather by specifying the index (from 0 to 15) that has been mapped to the -+ * desired channel. -+ */ -+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable); -+ -+/* ------------------- */ -+/* Pull-mode dequeuing */ -+/* ------------------- */ -+ -+/** -+ * struct qbman_pull_desc - the structure for pull dequeue descriptor -+ * @dont_manipulate_directly: the 6 32bit data to represent the whole -+ * possible settings for pull dequeue descriptor. -+ */ -+struct qbman_pull_desc { -+ uint32_t dont_manipulate_directly[6]; -+}; -+ -+enum qbman_pull_type_e { -+ /* dequeue with priority precedence, respect intra-class scheduling */ -+ qbman_pull_type_prio = 1, -+ /* dequeue with active FQ precedence, respect ICS */ -+ qbman_pull_type_active, -+ /* dequeue with active FQ precedence, no ICS */ -+ qbman_pull_type_active_noics -+}; -+ -+/** -+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ * @d: the pull dequeue descriptor to be cleared. -+ */ -+void qbman_pull_desc_clear(struct qbman_pull_desc *d); -+ -+/** -+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage -+ * @d: the pull dequeue descriptor to be set. -+ * @storage: the pointer of the memory to store the dequeue result. -+ * @storage_phys: the physical address of the storage memory. -+ * @stash: to indicate whether write allocate is enabled. -+ * -+ * If not called, or if called with 'storage' as NULL, the result pull dequeues -+ * will produce results to DQRR. If 'storage' is non-NULL, then results are -+ * produced to the given memory location (using the physical/DMA address which -+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not -+ * those writes to main-memory express a cache-warming attribute. -+ */ -+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, -+ struct qbman_result *storage, -+ dma_addr_t storage_phys, -+ int stash); -+/** -+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued. -+ * @d: the pull dequeue descriptor to be set. -+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive. -+ */ -+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, -+ uint8_t numframes); -+/** -+ * qbman_pull_desc_set_token() - Set dequeue token for pull command -+ * @d: the dequeue descriptor -+ * @token: the token to be set -+ * -+ * token is the value that shows up in the dequeue response that can be used to -+ * detect when the results have been published. The easiest technique is to zero -+ * result "storage" before issuing a dequeue, and use any non-zero 'token' value -+ */ -+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token); -+ -+/* Exactly one of the following descriptor "actions" should be set. (Calling any -+ * one of these will replace the effect of any prior call to one of these.) -+ * - pull dequeue from the given frame queue (FQ) -+ * - pull dequeue from any FQ in the given work queue (WQ) -+ * - pull dequeue from any FQ in any WQ in the given channel -+ */ -+/** -+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues. -+ * @fqid: the frame queue index of the given FQ. -+ */ -+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid); -+ -+/** -+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues. -+ * @wqid: composed of channel id and wqid within the channel. -+ * @dct: the dequeue command type. -+ */ -+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid, -+ enum qbman_pull_type_e dct); -+ -+/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command -+ * dequeues. -+ * @chid: the channel id to be dequeued. -+ * @dct: the dequeue command type. -+ */ -+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid, -+ enum qbman_pull_type_e dct); -+ -+/** -+ * qbman_swp_pull() - Issue the pull dequeue command -+ * @s: the software portal object. -+ * @d: the software portal descriptor which has been configured with -+ * the set of qbman_pull_desc_set_*() calls. -+ * -+ * Return 0 for success, and -EBUSY if the software portal is not ready -+ * to do pull dequeue. -+ */ -+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d); -+ -+/* -------------------------------- */ -+/* Polling DQRR for dequeue results */ -+/* -------------------------------- */ -+ -+/** -+ * qbman_swp_dqrr_next() - Get an valid DQRR entry. -+ * @s: the software portal object. -+ * -+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry -+ * only once, so repeated calls can return a sequence of DQRR entries, without -+ * requiring they be consumed immediately or in any particular order. -+ */ -+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *); -+ -+/** -+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from -+ * qbman_swp_dqrr_next(). -+ * @s: the software portal object. -+ * @dq: the DQRR entry to be consumed. -+ */ -+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq); -+ -+/** -+ * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr -+ * @dqrr: the given dqrr object. -+ * -+ * Return dqrr index. -+ */ -+uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr); -+ -+/** -+ * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the -+ * given portal -+ * @s: the given portal. -+ * @idx: the dqrr index. -+ * -+ * Return dqrr entry object. -+ */ -+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx); -+ -+/* ------------------------------------------------- */ -+/* Polling user-provided storage for dequeue results */ -+/* ------------------------------------------------- */ -+ -+/** -+ * qbman_result_has_new_result() - Check and get the dequeue response from the -+ * dq storage memory set in pull dequeue command -+ * @s: the software portal object. -+ * @dq: the dequeue result read from the memory. -+ * -+ * Only used for user-provided storage of dequeue results, not DQRR. For -+ * efficiency purposes, the driver will perform any required endianness -+ * conversion to ensure that the user's dequeue result storage is in host-endian -+ * format (whether or not that is the same as the little-endian format that -+ * hardware DMA'd to the user's storage). As such, once the user has called -+ * qbman_result_has_new_result() and been returned a valid dequeue result, -+ * they should not call it again on the same memory location (except of course -+ * if another dequeue command has been executed to produce a new result to that -+ * location). -+ * -+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid -+ * dequeue result. -+ */ -+int qbman_result_has_new_result(struct qbman_swp *s, -+ const struct qbman_result *dq); -+ -+/* -------------------------------------------------------- */ -+/* Parsing dequeue entries (DQRR and user-provided storage) */ -+/* -------------------------------------------------------- */ -+ -+/** -+ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not -+ * @dq: the dequeue result to be checked. -+ * -+ * DQRR entries may contain non-dequeue results, ie. notifications -+ */ -+int qbman_result_is_DQ(const struct qbman_result *); -+ -+/** -+ * qbman_result_is_SCN() - Check the dequeue result is notification or not -+ * @dq: the dequeue result to be checked. -+ * -+ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change -+ * notifications" of one type or another. Some APIs apply to all of them, of the -+ * form qbman_result_SCN_***(). -+ */ -+static inline int qbman_result_is_SCN(const struct qbman_result *dq) -+{ -+ return !qbman_result_is_DQ(dq); -+} -+ -+/* Recognise different notification types, only required if the user allows for -+ * these to occur, and cares about them when they do. -+ */ -+ -+/** -+ * qbman_result_is_FQDAN() - Check for FQ Data Availability -+ * @dq: the qbman_result object. -+ * -+ * Return 1 if this is FQDAN. -+ */ -+int qbman_result_is_FQDAN(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_is_CDAN() - Check for Channel Data Availability -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is CDAN. -+ */ -+int qbman_result_is_CDAN(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_is_CSCN() - Check for Congestion State Change -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is CSCN. -+ */ -+int qbman_result_is_CSCN(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_is_BPSCN() - Check for Buffer Pool State Change. -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is BPSCN. -+ */ -+int qbman_result_is_BPSCN(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_is_CGCU() - Check for Congestion Group Count Update. -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is CGCU. -+ */ -+int qbman_result_is_CGCU(const struct qbman_result *dq); -+ -+/* Frame queue state change notifications; (FQDAN in theory counts too as it -+ * leaves a FQ parked, but it is primarily a data availability notification) -+ */ -+ -+/** -+ * qbman_result_is_FQRN() - Check for FQ Retirement Notification. -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is FQRN. -+ */ -+int qbman_result_is_FQRN(const struct qbman_result *); -+ -+/** -+ * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is FQRNI. -+ */ -+int qbman_result_is_FQRNI(const struct qbman_result *); -+ -+/** -+ * qbman_result_is_FQPN() - Check for FQ Park Notification -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is FQPN. -+ */ -+int qbman_result_is_FQPN(const struct qbman_result *dq); -+ -+/* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE) -+ */ -+/* FQ empty */ -+#define QBMAN_DQ_STAT_FQEMPTY 0x80 -+/* FQ held active */ -+#define QBMAN_DQ_STAT_HELDACTIVE 0x40 -+/* FQ force eligible */ -+#define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20 -+/* Valid frame */ -+#define QBMAN_DQ_STAT_VALIDFRAME 0x10 -+/* FQ ODP enable */ -+#define QBMAN_DQ_STAT_ODPVALID 0x04 -+/* Volatile dequeue */ -+#define QBMAN_DQ_STAT_VOLATILE 0x02 -+/* volatile dequeue command is expired */ -+#define QBMAN_DQ_STAT_EXPIRED 0x01 -+ -+/** -+ * qbman_result_DQ_flags() - Get the STAT field of dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the state field. -+ */ -+uint32_t qbman_result_DQ_flags(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull -+ * command. -+ * @dq: the dequeue result. -+ * -+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue. -+ */ -+static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq) -+{ -+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE); -+} -+ -+/** -+ * qbman_result_DQ_is_pull_complete() - Check whether the pull command is -+ * completed. -+ * @dq: the dequeue result. -+ * -+ * Return boolean. -+ */ -+static inline int qbman_result_DQ_is_pull_complete( -+ const struct qbman_result *dq) -+{ -+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED); -+} -+ -+/** -+ * qbman_result_DQ_seqnum() - Get the seqnum field in dequeue response -+ * seqnum is valid only if VALIDFRAME flag is TRUE -+ * @dq: the dequeue result. -+ * -+ * Return seqnum. -+ */ -+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response -+ * odpid is valid only if ODPVAILD flag is TRUE. -+ * @dq: the dequeue result. -+ * -+ * Return odpid. -+ */ -+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_fqid() - Get the fqid in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return fqid. -+ */ -+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_byte_count() - Get the byte count in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the byte count remaining in the FQ. -+ */ -+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_frame_count - Get the frame count in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame count remaining in the FQ. -+ */ -+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame queue context. -+ */ -+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame descriptor. -+ */ -+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq); -+ -+/* State-change notifications (FQDAN/CDAN/CSCN/...). */ -+ -+/** -+ * qbman_result_SCN_state() - Get the state field in State-change notification -+ * @scn: the state change notification. -+ * -+ * Return the state in the notifiation. -+ */ -+uint8_t qbman_result_SCN_state(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_SCN_rid() - Get the resource id from the notification -+ * @scn: the state change notification. -+ * -+ * Return the resource id. -+ */ -+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_SCN_ctx() - get the context from the notification -+ * @scn: the state change notification. -+ * -+ * Return the context. -+ */ -+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_SCN_state_in_mem() - Get the state in notification written -+ * in memory -+ * @scn: the state change notification. -+ * -+ * Return the state. -+ */ -+uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_SCN_rid_in_mem() - Get the resource id in notification written -+ * in memory. -+ * @scn: the state change notification. -+ * -+ * Return the resource id. -+ */ -+uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn); -+ -+ -+/* Type-specific "resource IDs". Mainly for illustration purposes, though it -+ * also gives the appropriate type widths. -+ */ -+/* Get the FQID from the FQDAN */ -+#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq) -+/* Get the FQID from the FQRN */ -+#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq) -+/* Get the FQID from the FQRNI */ -+#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq) -+/* Get the FQID from the FQPN */ -+#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq) -+/* Get the channel ID from the CDAN */ -+#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) -+/* Get the CGID from the CSCN */ -+#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) -+ -+/** -+ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN -+ * @scn: the state change notification. -+ * -+ * Return the buffer pool id. -+ */ -+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_bpscn_has_free_bufs() - Check whether there are free -+ * buffers in the pool from BPSCN. -+ * @scn: the state change notification. -+ * -+ * Return the number of free buffers. -+ */ -+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the -+ * buffer pool is depleted. -+ * @scn: the state change notification. -+ * -+ * Return the status of buffer pool depletion. -+ */ -+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer -+ * pool is surplus or not. -+ * @scn: the state change notification. -+ * -+ * Return the status of buffer pool surplus. -+ */ -+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message -+ * @scn: the state change notification. -+ * -+ * Return the BPSCN context. -+ */ -+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn); -+ -+/* Parsing CGCU */ -+/** -+ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid -+ * @scn: the state change notification. -+ * -+ * Return the CGCU resource id. -+ */ -+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU -+ * @scn: the state change notification. -+ * -+ * Return instantaneous count in the CGCU notification. -+ */ -+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn); -+ -+ /************/ -+ /* Enqueues */ -+ /************/ -+ -+/** -+ * struct qbman_eq_desc - structure of enqueue descriptor -+ * @dont_manipulate_directly: the 8 32bit data to represent the whole -+ * possible qbman enqueue setting in enqueue descriptor. -+ */ -+struct qbman_eq_desc { -+ uint32_t dont_manipulate_directly[8]; -+}; -+ -+/** -+ * struct qbman_eq_response - structure of enqueue response -+ * @dont_manipulate_directly: the 16 32bit data to represent the whole -+ * enqueue response. -+ */ -+struct qbman_eq_response { -+ uint32_t dont_manipulate_directly[16]; -+}; -+ -+/** -+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ * @d: the given enqueue descriptor. -+ */ -+void qbman_eq_desc_clear(struct qbman_eq_desc *d); -+ -+/* Exactly one of the following descriptor "actions" should be set. (Calling -+ * any one of these will replace the effect of any prior call to one of these.) -+ * - enqueue without order-restoration -+ * - enqueue with order-restoration -+ * - fill a hole in the order-restoration sequence, without any enqueue -+ * - advance NESN (Next Expected Sequence Number), without any enqueue -+ * 'respond_success' indicates whether an enqueue response should be DMA'd -+ * after success (otherwise a response is DMA'd only after failure). -+ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to -+ * be enqueued. -+ */ -+ -+/** -+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp -+ * @d: the enqueue descriptor. -+ * @response_success: 1 = enqueue with response always; 0 = enqueue with -+ * rejections returned on a FQ. -+ */ -+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success); -+/** -+ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor -+ * @d: the enqueue descriptor. -+ * @response_success: 1 = enqueue with response always; 0 = enqueue with -+ * rejections returned on a FQ. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ * @incomplete: indiates whether this is the last fragments using the same -+ * sequeue number. -+ */ -+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, -+ uint32_t opr_id, uint32_t seqnum, int incomplete); -+ -+/** -+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence -+ * without any enqueue -+ * @d: the enqueue descriptor. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ */ -+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum); -+ -+/** -+ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number) -+ * without any enqueue -+ * @d: the enqueue descriptor. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ */ -+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum); -+/** -+ * qbman_eq_desc_set_response() - Set the enqueue response info. -+ * @d: the enqueue descriptor -+ * @storage_phys: the physical address of the enqueue response in memory. -+ * @stash: indicate that the write allocation enabled or not. -+ * -+ * In the case where an enqueue response is DMA'd, this determines where that -+ * response should go. (The physical/DMA address is given for hardware's -+ * benefit, but software should interpret it as a "struct qbman_eq_response" -+ * data structure.) 'stash' controls whether or not the write to main-memory -+ * expresses a cache-warming attribute. -+ */ -+void qbman_eq_desc_set_response(struct qbman_eq_desc *d, -+ dma_addr_t storage_phys, -+ int stash); -+ -+/** -+ * qbman_eq_desc_set_token() - Set token for the enqueue command -+ * @d: the enqueue descriptor -+ * @token: the token to be set. -+ * -+ * token is the value that shows up in an enqueue response that can be used to -+ * detect when the results have been published. The easiest technique is to zero -+ * result "storage" before issuing an enqueue, and use any non-zero 'token' -+ * value. -+ */ -+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token); -+ -+/** -+ * Exactly one of the following descriptor "targets" should be set. (Calling any -+ * one of these will replace the effect of any prior call to one of these.) -+ * - enqueue to a frame queue -+ * - enqueue to a queuing destination -+ * Note, that none of these will have any affect if the "action" type has been -+ * set to "orp_hole" or "orp_nesn". -+ */ -+/** -+ * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command -+ * @d: the enqueue descriptor -+ * @fqid: the id of the frame queue to be enqueued. -+ */ -+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid); -+ -+/** -+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command. -+ * @d: the enqueue descriptor -+ * @qdid: the id of the queuing destination to be enqueued. -+ * @qd_bin: the queuing destination bin -+ * @qd_prio: the queuing destination priority. -+ */ -+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid, -+ uint32_t qd_bin, uint32_t qd_prio); -+ -+/** -+ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt -+ * @d: the enqueue descriptor -+ * @enable: boolean to enable/disable EQDI -+ * -+ * Determines whether or not the portal's EQDI interrupt source should be -+ * asserted after the enqueue command is completed. -+ */ -+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable); -+ -+/** -+ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command. -+ * @d: the enqueue descriptor. -+ * @enable: enabled/disable DCA mode. -+ * @dqrr_idx: DCAP_CI, the DCAP consumer index. -+ * @park: determine the whether park the FQ or not -+ * -+ * Determines whether or not a portal DQRR entry should be consumed once the -+ * enqueue command is completed. (And if so, and the DQRR entry corresponds to a -+ * held-active (order-preserving) FQ, whether the FQ should be parked instead of -+ * being rescheduled.) -+ */ -+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable, -+ uint32_t dqrr_idx, int park); -+ -+/** -+ * qbman_swp_enqueue() - Issue an enqueue command. -+ * @s: the software portal used for enqueue. -+ * @d: the enqueue descriptor. -+ * @fd: the frame descriptor to be enqueued. -+ * -+ * Please note that 'fd' should only be NULL if the "action" of the -+ * descriptor is "orp_hole" or "orp_nesn". -+ * -+ * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready. -+ */ -+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd); -+ -+/* TODO: -+ * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt. -+ * @s: the software portal. -+ * @thresh: the threshold to trigger the EQRI interrupt. -+ * -+ * An EQRI interrupt can be generated when the fill-level of EQCR falls below -+ * the 'thresh' value set here. Setting thresh==0 (the default) disables. -+ */ -+int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh); -+ -+ /*******************/ -+ /* Buffer releases */ -+ /*******************/ -+/** -+ * struct qbman_release_desc - The structure for buffer release descriptor -+ * @dont_manipulate_directly: the 32bit data to represent the whole -+ * possible settings of qbman release descriptor. -+ */ -+struct qbman_release_desc { -+ uint32_t dont_manipulate_directly[1]; -+}; -+ -+/** -+ * qbman_release_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ * @d: the qbman release descriptor. -+ */ -+void qbman_release_desc_clear(struct qbman_release_desc *d); -+ -+/** -+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to -+ * @d: the qbman release descriptor. -+ */ -+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid); -+ -+/** -+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI -+ * interrupt source should be asserted after the release command is completed. -+ * @d: the qbman release descriptor. -+ */ -+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable); -+ -+/** -+ * qbman_swp_release() - Issue a buffer release command. -+ * @s: the software portal object. -+ * @d: the release descriptor. -+ * @buffers: a pointer pointing to the buffer address to be released. -+ * @num_buffers: number of buffers to be released, must be less than 8. -+ * -+ * Return 0 for success, -EBUSY if the release command ring is not ready. -+ */ -+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, -+ const uint64_t *buffers, unsigned int num_buffers); -+ -+/* TODO: -+ * qbman_swp_release_thresh() - Set threshold for RCRI interrupt -+ * @s: the software portal. -+ * @thresh: the threshold. -+ * An RCRI interrupt can be generated when the fill-level of RCR falls below -+ * the 'thresh' value set here. Setting thresh==0 (the default) disables. -+ */ -+int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh); -+ -+ /*******************/ -+ /* Buffer acquires */ -+ /*******************/ -+/** -+ * qbman_swp_acquire() - Issue a buffer acquire command. -+ * @s: the software portal object. -+ * @bpid: the buffer pool index. -+ * @buffers: a pointer pointing to the acquired buffer address|es. -+ * @num_buffers: number of buffers to be acquired, must be less than 8. -+ * -+ * Return 0 for success, or negative error code if the acquire command -+ * fails. -+ */ -+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers, -+ unsigned int num_buffers); -+ -+ /*****************/ -+ /* FQ management */ -+ /*****************/ -+/** -+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue to be scheduled. -+ * -+ * There are a couple of different ways that a FQ can end up parked state, -+ * This schedules it. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid); -+ -+/** -+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue to be forced. -+ * -+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled -+ * and thus be available for selection by any channel-dequeuing behaviour (push -+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still -+ * empty at the time this happens, the resulting dq_entry will have no FD. -+ * (qbman_result_DQ_fd() will return NULL.) -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid); -+ -+/** -+ * These functions change the FQ flow-control stuff between XON/XOFF. (The -+ * default is XON.) This setting doesn't affect enqueues to the FQ, just -+ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when -+ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is -+ * changed to XOFF after it had already become truly-scheduled to a channel, and -+ * a pull dequeue of that channel occurs that selects that FQ for dequeuing, -+ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will -+ * return NULL.) -+ */ -+/** -+ * qbman_swp_fq_xon() - XON the frame queue. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid); -+/** -+ * qbman_swp_fq_xoff() - XOFF the frame queue. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid); -+ -+ /**********************/ -+ /* Channel management */ -+ /**********************/ -+ -+/** -+ * If the user has been allocated a channel object that is going to generate -+ * CDANs to another channel, then these functions will be necessary. -+ * CDAN-enabled channels only generate a single CDAN notification, after which -+ * it they need to be reenabled before they'll generate another. (The idea is -+ * that pull dequeuing will occur in reaction to the CDAN, followed by a -+ * reenable step.) Each function generates a distinct command to hardware, so a -+ * combination function is provided if the user wishes to modify the "context" -+ * (which shows up in each CDAN message) each time they reenable, as a single -+ * command to hardware. -+ */ -+ -+/** -+ * qbman_swp_CDAN_set_context() - Set CDAN context -+ * @s: the software portal object. -+ * @channelid: the channel index. -+ * @ctx: the context to be set in CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx); -+ -+/** -+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel. -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid); -+ -+/** -+ * qbman_swp_CDAN_disable() - disable CDAN for the channel. -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid); -+ -+/** -+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * @ctx: the context set in CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx); -+int qbman_swp_fill_ring(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd, -+ uint8_t burst_index); -+int qbman_swp_flush_ring(struct qbman_swp *s); -+void qbman_sync(void); -+int qbman_swp_send_multiple(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd, -+ int frames_to_send); -+ -+int qbman_check_command_complete(struct qbman_swp *s, -+ const struct qbman_result *dq); -+#endif /* !_FSL_QBMAN_PORTAL_H */ -diff --git a/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h b/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h -new file mode 100644 -index 0000000..b35c3ee ---- /dev/null -+++ b/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h -@@ -0,0 +1,313 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef _RTE_ETH_DPAA2_PVT_H_ -+#define _RTE_ETH_DPAA2_PVT_H_ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+typedef uint64_t dma_addr_t; -+ -+#define FALSE 0 -+#define TRUE 1 -+#ifndef false -+#define false FALSE -+#endif -+#ifndef true -+#define true TRUE -+#endif -+#define lower_32_bits(x) ((uint32_t)(x)) -+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16)) -+ -+#ifndef ETH_ADDR_LEN -+#define ETH_ADDR_LEN 6 -+#endif -+#ifndef ETH_VLAN_HLEN -+#define ETH_VLAN_HLEN 4 /** < Vlan Header Length */ -+#endif -+ -+#define NUM_MAX_RECV_FRAMES 16 -+ -+#define MC_PORTAL_INDEX 0 -+#define NUM_DPIO_REGIONS 2 -+#define NUM_DQS_PER_QUEUE 2 -+#define MC_PORTALS_BASE_PADDR 0x00080C000000ULL -+#define MC_PORTAL_STRIDE 0x10000 -+#define MC_PORTAL_SIZE 64 -+#define MC_PORTAL_ID_TO_PADDR(portal_id) \ -+(MC_PORTALS_BASE_PADDR + (portal_id) * MC_PORTAL_STRIDE) -+ -+struct dpaa2_dpio_dev { -+ TAILQ_ENTRY(dpaa2_dpio_dev) next; /**< Pointer to Next device instance */ -+ uint16_t index; /**< Index of a instance in the list */ -+ rte_atomic16_t ref_count; /**< How many thread contexts are sharing this.*/ -+ struct fsl_mc_io *dpio; /** handle to DPIO portal object */ -+ uint16_t token; -+ struct qbman_swp *sw_portal; /** SW portal object */ -+ const struct qbman_result *dqrr[4]; /**< DQRR Entry for this SW portal */ -+ pthread_mutex_t lock; /** Required when Portal is shared */ -+ void *mc_portal; /**< MC Portal for configuring this device */ -+ uintptr_t qbman_portal_ce_paddr; /**< Physical address of Cache Enabled Area */ -+ uintptr_t ce_size; /**< Size of the CE region */ -+ uintptr_t qbman_portal_ci_paddr; /**< Physical address of Cache Inhibit Area */ -+ uintptr_t ci_size; /**< Size of the CI region */ -+ void *intr_handle; -+ int32_t vfio_fd; /**< File descriptor received via VFIO */ -+ int32_t hw_id; /**< An unique ID of this DPIO device instance */ -+}; -+ -+struct queue_storage_info_t { -+ struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE]; -+ struct qbman_result *active_dqs; -+ int toggle; -+}; -+ -+struct thread_io_info_t { -+ struct dpaa2_dpio_dev *dpio_dev; -+ struct dpaa2_dpio_dev *sec_dpio_dev; -+ struct qbman_result *global_active_dqs; -+}; -+ -+/*! Global per thread DPIO portal */ -+extern __thread struct thread_io_info_t thread_io_info; -+/*! Global MCP list */ -+extern void *(*mcp_ptr_list); -+ -+/* Refer to Table 7-3 in SEC BG */ -+struct qbman_fle { -+ uint32_t addr_lo; -+ uint32_t addr_hi; -+ uint32_t length; -+ /* FMT must be 00, MSB is final bit */ -+ uint32_t fin_bpid_offset; -+ uint32_t frc; -+ uint32_t reserved[3]; /* Not used currently */ -+}; -+ -+/* Maximum release/acquire from QBMAN */ -+#define DPAA2_MBUF_MAX_ACQ_REL 7 -+ -+#define MAX_BPID 256 -+ -+/*Macros to define operations on FD*/ -+#define DPAA2_SET_FD_ADDR(fd, addr) \ -+ fd->simple.addr_lo = lower_32_bits((uint64_t)addr); \ -+ fd->simple.addr_hi = upper_32_bits((uint64_t)addr); -+#define DPAA2_SET_FD_LEN(fd, length) fd->simple.len = length -+#define DPAA2_SET_FD_BPID(fd, bpid) fd->simple.bpid_offset |= bpid; -+#define DPAA2_SET_FD_IVP(fd) ((fd->simple.bpid_offset |= 0x00004000)) -+#define DPAA2_SET_FD_OFFSET(fd, offset) (fd->simple.bpid_offset |= (uint32_t)(offset) << 16); -+#define DPAA2_SET_FD_FRC(fd, frc) fd->simple.frc = frc; -+#define DPAA2_RESET_FD_CTRL(fd) fd->simple.ctrl = 0; -+ -+#define DPAA2_SET_FD_ASAL(fd, asal) (fd->simple.ctrl |= (asal << 16)) -+#define DPAA2_SET_FD_FLC(fd, addr) \ -+ fd->simple.flc_lo = lower_32_bits((uint64_t)addr); \ -+ fd->simple.flc_hi = upper_32_bits((uint64_t)addr); -+#define DPAA2_GET_FLE_ADDR(fle) \ -+ (uint64_t)((((uint64_t)(fle->addr_hi)) << 32) + fle->addr_lo) -+#define DPAA2_SET_FLE_ADDR(fle, addr) \ -+ fle->addr_lo = lower_32_bits((uint64_t)addr); \ -+ fle->addr_hi = upper_32_bits((uint64_t)addr); -+#define DPAA2_SET_FLE_OFFSET(fle, offset) (fle)->fin_bpid_offset |= (uint32_t)(offset) << 16; -+#define DPAA2_SET_FLE_BPID(fle, bpid) (fle)->fin_bpid_offset |= (uint64_t)bpid; -+#define DPAA2_GET_FLE_BPID(fle, bpid) (fle->fin_bpid_offset & 0x000000ff) -+#define DPAA2_SET_FLE_FIN(fle) fle->fin_bpid_offset |= (uint64_t)1 << 31; -+#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000)) -+#define DPAA2_SET_FD_COMPOUND_FMT(fd) \ -+ fd->simple.bpid_offset |= (uint32_t)1 << 28; -+#define DPAA2_GET_FD_ADDR(fd) \ -+ (uint64_t)((((uint64_t)(fd->simple.addr_hi)) << 32) + fd->simple.addr_lo) -+#define DPAA2_GET_FD_LEN(fd) (fd->simple.len) -+#define DPAA2_GET_FD_BPID(fd) ((fd->simple.bpid_offset & 0x00003FFF)) -+#define DPAA2_GET_FD_IVP(fd) ((fd->simple.bpid_offset & 0x00004000) >> 14) -+#define DPAA2_GET_FD_OFFSET(fd) ((fd->simple.bpid_offset & 0x0FFF0000) >> 16) -+#define DPAA2_GET_FD_FRC(fd) (fd->simple.frc) -+#define DPAA2_GET_FD_FLC(fd) \ -+ (uint64_t)((((uint64_t)(fd->simple.flc_hi)) << 32) + fd->simple.flc_lo) -+ -+#define DPAA2_SET_FLE_SG_EXT(fle) fle->fin_bpid_offset |= (uint64_t)1<<29; -+#define DPAA2_IS_SET_FLE_SG_EXT(fle) \ -+ (fle->fin_bpid_offset & ((uint64_t)1<<29))? 1 : 0 -+ -+#define DPAA2_INLINE_MBUF_FROM_BUF(buf) \ -+ ((struct rte_mbuf *)((uint64_t)buf + DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES)) -+#define DPAA2_BUF_FROM_INLINE_MBUF(mbuf) \ -+ ((uint8_t *)((uint64_t)mbuf - (DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES))) -+ -+#define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64) -+ -+/*Macros to define QBMAN enqueue options */ -+#define DPAA2_ETH_EQ_DISABLE 0 /*!< Dont Enqueue the Frame */ -+#define DPAA2_ETH_EQ_RESP_ON_SUCC 1 /*!< Enqueue the Frame with -+ response after success*/ -+#define DPAA2_ETH_EQ_RESP_ON_FAIL 2 /*!< Enqueue the Frame with -+ response after failure*/ -+#define DPAA2_ETH_EQ_NO_RESP 3 /*!< Enqueue the Frame without -+ response*/ -+/* Only Enqueue Error responses will be -+ * pushed on FQID_ERR of Enqueue FQ */ -+#define DPAA2_EQ_RESP_ERR_FQ 0 -+/* All Enqueue responses will be pushed on address -+ * set with qbman_eq_desc_set_response */ -+#define DPAA2_EQ_RESP_ALWAYS 1 -+ -+#define DPAA2_MAX_BUF_POOLS 8 -+ -+struct dpbp_node { -+ struct dpbp_node *next; -+ struct fsl_mc_io dpbp; -+ uint16_t token; -+ int dpbp_id; -+}; -+ -+struct buf_pool_cfg { -+ void *addr; /*!< The address from where DPAA2 will carve out the -+ * buffers. 'addr' should be 'NULL' if user wants -+ * to create buffers from the memory which user -+ * asked DPAA2 to reserve during 'nadk init' */ -+ phys_addr_t phys_addr; /*!< corresponding physical address -+ * of the memory provided in addr */ -+ uint32_t num; /*!< number of buffers */ -+ uint32_t size; /*!< size of each buffer. 'size' should include -+ * any headroom to be reserved and alignment */ -+ uint16_t align; /*!< Buffer alignment (in bytes) */ -+ uint16_t bpid; /*!< The buffer pool id. This will be filled -+ *in by DPAA2 for each buffer pool */ -+}; -+ -+struct buf_pool { -+ uint32_t size; -+ uint32_t num_bufs; -+ uint16_t bpid; -+ uint8_t *h_bpool_mem; -+ struct rte_mempool *mp; -+ struct dpbp_node *dpbp_node; -+}; -+ -+/*! -+ * Buffer pool list configuration structure. User need to give DPAA2 the -+ * valid number of 'num_buf_pools'. -+ */ -+struct dpaa2_bp_list_cfg { -+ struct buf_pool_cfg buf_pool; /* Configuration -+ * of each buffer pool */ -+}; -+ -+struct dpaa2_bp_list { -+ struct dpaa2_bp_list *next; -+ struct rte_mempool *mp; -+ struct buf_pool buf_pool; -+}; -+ -+struct bp_info { -+ uint32_t size; -+ uint32_t meta_data_size; -+ struct dpaa2_bp_list *bp_list; -+}; -+ -+extern struct dpaa2_bp_list *h_bp_list; -+ -+//todo - this is costly, need to write a fast coversion routine -+static void *dpaa2_mem_ptov(phys_addr_t paddr) -+{ -+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); -+ int i; -+ -+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { -+ if (paddr >= memseg[i].phys_addr && -+ (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len) -+ return (void *)(memseg[i].addr_64 + (paddr - memseg[i].phys_addr)); -+ } -+ return NULL; -+} -+ -+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) -+{ -+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); -+ int i; -+ -+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { -+ if (vaddr >= memseg[i].addr_64 && -+ vaddr < memseg[i].addr_64 + memseg[i].len) -+ return memseg[i].phys_addr + (vaddr - memseg[i].addr_64); -+ } -+ return (phys_addr_t)(NULL); -+} -+ -+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA -+/* -+ * When we are using Physical addresses as IO Virtual Addresses, -+ * we call conversion routines nadk_mem_vtop & nadk_mem_ptov wherever required. -+ * These routines are called with help of below MACRO's -+ */ -+ -+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) (mbuf->buf_physaddr) -+ -+/** -+ * macro to convert Virtual address to IOVA -+ */ -+#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr)) -+ -+/** -+ * macro to convert IOVA to Virtual address -+ */ -+#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova)) -+ -+/** -+ * macro to convert modify the memory containing Virtual address to IOVA -+ */ -+#define DPAA2_MODIFY_VADDR_TO_IOVA(_mem, _type) \ -+ {_mem = (_type)(dpaa2_mem_vtop((uint64_t)(_mem))); } -+ -+/** -+ * macro to convert modify the memory containing IOVA to Virtual address -+ */ -+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \ -+ {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); } -+ -+#else -+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) (mbuf->buf_addr) -+ -+#define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr) -+#define DPAA2_IOVA_TO_VADDR(_iova) (_iova) -+#define DPAA2_MODIFY_VADDR_TO_IOVA(_mem, _type) -+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) -+#endif -+ -+#endif -diff --git a/drivers/net/dpaa2/rte_eth_dpbp.c b/drivers/net/dpaa2/rte_eth_dpbp.c -new file mode 100644 -index 0000000..6a7617d ---- /dev/null -+++ b/drivers/net/dpaa2/rte_eth_dpbp.c -@@ -0,0 +1,430 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "rte_pci.h" -+#include "rte_memzone.h" -+ -+#include "rte_eth_dpaa2_pvt.h" -+#include "fsl_qbman_portal.h" -+#include -+ -+#include -+#include "dpaa2_logs.h" -+ -+static struct dpbp_node *g_dpbp_list; -+static struct dpbp_node *avail_dpbp; -+ -+struct bp_info bpid_info[MAX_BPID]; -+ -+struct dpaa2_bp_list *h_bp_list; -+ -+int -+dpaa2_create_dpbp_device( -+ int dpbp_id) -+{ -+ struct dpbp_node *dpbp_node; -+ int ret; -+ -+ /* Allocate DPAA2 dpbp handle */ -+ dpbp_node = (struct dpbp_node *)malloc(sizeof(struct dpbp_node)); -+ if (!dpbp_node) { -+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPBP Device\n"); -+ return -1; -+ } -+ -+ /* Open the dpbp object */ -+ dpbp_node->dpbp.regs = mcp_ptr_list[MC_PORTAL_INDEX]; -+ ret = dpbp_open(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_id, &dpbp_node->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Resource allocation failure with err code: %d", -+ ret); -+ free(dpbp_node); -+ return -1; -+ } -+ -+ /* Clean the device first */ -+ ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure cleaning dpbp device with" -+ "error code %d\n", ret); -+ return -1; -+ } -+ -+ dpbp_node->dpbp_id = dpbp_id; -+ /* Add the dpbp handle into the global list */ -+ dpbp_node->next = g_dpbp_list; -+ g_dpbp_list = dpbp_node; -+ avail_dpbp = g_dpbp_list; -+ -+ PMD_DRV_LOG(INFO, "Buffer resource initialized\n"); -+ -+ return 0; -+} -+ -+int hw_mbuf_create_pool(struct rte_mempool *mp) -+{ -+ struct dpaa2_bp_list *bp_list; -+ struct dpbp_attr dpbp_attr; -+ int ret; -+ -+ if (!avail_dpbp) { -+ PMD_DRV_LOG(ERR, "DPAA2 resources not available\n"); -+ return -1; -+ } -+ -+ ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token); -+ if (ret != 0) { -+ PMD_DRV_LOG(ERR, "Resource enable failure with" -+ "err code: %d\n", ret); -+ return -1; -+ } -+ -+ ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW, -+ avail_dpbp->token, &dpbp_attr); -+ if (ret != 0) { -+ PMD_DRV_LOG(ERR, "Resource read failure with" -+ "err code: %d\n", ret); -+ ret = dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, -+ avail_dpbp->token); -+ return -1; -+ } -+ -+ /* Allocate the bp_list which will be added into global_bp_list */ -+ bp_list = (struct dpaa2_bp_list *)malloc(sizeof(struct dpaa2_bp_list)); -+ if (!bp_list) { -+ PMD_DRV_LOG(ERR, "No heap memory available\n"); -+ return -1; -+ } -+ -+ /* Set parameters of buffer pool list */ -+ bp_list->buf_pool.num_bufs = mp->size; -+ bp_list->buf_pool.size = mp->elt_size -+ - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp); -+ bp_list->buf_pool.bpid = dpbp_attr.bpid; -+ bp_list->buf_pool.h_bpool_mem = NULL; -+ bp_list->buf_pool.mp = mp; -+ bp_list->buf_pool.dpbp_node = avail_dpbp; -+ bp_list->next = h_bp_list; -+ -+ mp->offload_ptr = dpbp_attr.bpid; -+ -+ /* Increment the available DPBP */ -+ avail_dpbp = avail_dpbp->next; -+ -+ bpid_info[dpbp_attr.bpid].size = bp_list->buf_pool.size; -+ bpid_info[dpbp_attr.bpid].meta_data_size = sizeof(struct rte_mbuf) -+ + rte_pktmbuf_priv_size(mp); -+ bpid_info[dpbp_attr.bpid].bp_list = bp_list; -+ -+ PMD_DRV_LOG(INFO, "BP List created for bpid =%d\n", dpbp_attr.bpid); -+ -+ h_bp_list = bp_list; -+ return 0; -+} -+ -+static inline void dpaa2_mbuf_release(uint64_t buf, uint32_t bpid) -+{ -+ struct qbman_release_desc releasedesc; -+ struct qbman_swp *swp; -+ int ret; -+ -+ if (!thread_io_info.dpio_dev) { -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret != 0) { -+ PMD_DRV_LOG(ERR, "Failed to allocate IO portal"); -+ return; -+ } -+ } -+ swp = thread_io_info.dpio_dev->sw_portal; -+ -+ /* Create a release descriptor required for releasing -+ * buffers into BMAN */ -+ qbman_release_desc_clear(&releasedesc); -+ qbman_release_desc_set_bpid(&releasedesc, bpid); -+ -+ DPAA2_MODIFY_VADDR_TO_IOVA(buf, uint64_t); -+ do { -+ /* Release buffer into the BMAN */ -+ ret = qbman_swp_release(swp, &releasedesc, &buf, 1); -+ } while (ret == -EBUSY); -+ PMD_TX_FREE_LOG(DEBUG, "Released %p address to BMAN\n", buf); -+} -+ -+int hw_mbuf_alloc(struct rte_mempool *mp, void **mb) -+{ -+ struct qbman_swp *swp; -+ uint16_t bpid; -+ uint64_t buf; -+ int ret; -+ struct rte_mbuf *m; -+ -+ if ((mp->offload_ptr > MAX_BPID) || -+ !(bpid_info[mp->offload_ptr].bp_list)) { -+ -+ PMD_DRV_LOG(INFO, "DPAA2 buffer pool not configured\n"); -+ return -2; -+ } -+ -+ bpid = mp->offload_ptr; -+ -+ if (!thread_io_info.dpio_dev) { -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret != 0) { -+ PMD_DRV_LOG(ERR, "Failed to allocate IO portal"); -+ return -1; -+ } -+ } -+ swp = thread_io_info.dpio_dev->sw_portal; -+ -+ do { -+ ret = qbman_swp_acquire(swp, bpid, &buf, 1); -+ } while (ret == -EBUSY); -+ if (ret <= 0) { -+ PMD_DRV_LOG(INFO, "Buffer alloc(bpid %d)fail: err: %x", -+ bpid, ret); -+ return -1; -+ } -+ DPAA2_MODIFY_IOVA_TO_VADDR(buf, uint64_t); -+ -+ PMD_DRV_LOG(INFO, "Acquired %p address from BMAN\n", buf); -+ m = (struct rte_mbuf *)DPAA2_INLINE_MBUF_FROM_BUF(buf); -+ RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0); -+ rte_mbuf_refcnt_set(m, 1); -+ *mb = m; -+ return 0; -+} -+ -+int hw_mbuf_free(void __rte_unused *m) -+{ -+ struct rte_mbuf *mb = (struct rte_mbuf *)m; -+ if ((mb->pool->offload_ptr > MAX_BPID) || -+ !(bpid_info[mb->pool->offload_ptr].bp_list)) { -+ -+ PMD_DRV_LOG(INFO, "DPAA2 buffer pool not configured\n"); -+ return -1; -+ } -+ -+ dpaa2_mbuf_release((uint64_t)DPAA2_BUF_FROM_INLINE_MBUF(m), -+ mb->pool->offload_ptr); -+ return 0; -+} -+ -+int hw_mbuf_alloc_bulk(struct rte_mempool *pool, -+ void **obj_table, unsigned count) -+{ -+ static int alloc; -+ struct qbman_swp *swp; -+ uint32_t mbuf_size; -+ uint16_t bpid; -+ uint64_t bufs[64]; -+ int ret; -+ unsigned i, n = 0; -+ struct rte_mbuf **mt = (struct rte_mbuf **)obj_table; -+ -+ //PMD_DRV_LOG(DEBUG, MBUF, "%s/n", __func__); -+ if ((pool->offload_ptr > MAX_BPID) || -+ !(bpid_info[pool->offload_ptr].bp_list)) { -+ -+ printf("\nDPAA2 buffer pool not configured\n"); -+ return -2; -+ } -+ -+ bpid = pool->offload_ptr; -+ -+ if (!thread_io_info.dpio_dev) { -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret != 0) { -+ PMD_DRV_LOG(ERR, "Failed to allocate IO portal"); -+ return -1; -+ } -+ } -+ swp = thread_io_info.dpio_dev->sw_portal; -+ -+ /* if number of buffers requested is less than 7 */ -+ if (count < DPAA2_MBUF_MAX_ACQ_REL) { -+ ret = qbman_swp_acquire(swp, bpid, &bufs[n], count); -+ if (ret <= 0){ -+ PMD_DRV_LOG(ERR, "Failed to allocate buffers %d", ret); -+ return -1; -+ } -+ n = ret; -+ goto set_buf; -+ } -+ -+ while (n < count) { -+ ret = 0; -+ /* Acquire is all-or-nothing, so we drain in 7s, -+ * then in 1s for the remainder. */ -+ if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) { -+ ret = qbman_swp_acquire(swp, bpid, &bufs[n], -+ DPAA2_MBUF_MAX_ACQ_REL); -+ if (ret == DPAA2_MBUF_MAX_ACQ_REL) { -+ n += ret; -+ } -+ } -+ if (ret < DPAA2_MBUF_MAX_ACQ_REL) { -+ ret = qbman_swp_acquire(swp, bpid, &bufs[n], 1); -+ if (ret > 0) { -+ PMD_DRV_LOG(DEBUG, "Drained buffer: %x", -+ bufs[n]); -+ n += ret; -+ } -+ } -+ if (ret < 0) { -+ PMD_DRV_LOG(WARNING, "Buffer aquire failed with" -+ "err code: %d", ret); -+ break; -+ } -+ } -+ if (ret < 0 || n == 0){ -+ PMD_DRV_LOG(ERR, "Failed to allocate buffers %d", ret); -+ return -1; -+ } -+set_buf: -+ -+ mbuf_size = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(pool); -+ -+ for (i = 0; i < n; i++ ) { -+ -+ DPAA2_MODIFY_IOVA_TO_VADDR(buf[i], uint64_t); -+ -+ mt[i] = (struct rte_mbuf *)(bufs[i] - mbuf_size); -+ PMD_DRV_LOG(DEBUG,"Acquired %p address %p from BMAN\n", (void *)bufs[i], (void *)mt[i]); -+ if (!bufs[i] || !mt[i]) { -+ printf("\n ??????? how come we have a null buffer %p, %p", -+ (void *)bufs[i], (void *)mt[i]); -+ } -+ } -+ -+ alloc +=n; -+ PMD_DRV_LOG(DEBUG, "Total = %d , req = %d done = %d", -+ alloc, count, n); -+ return 0; -+} -+ -+int hw_mbuf_free_bulk(struct rte_mempool *pool, void * const *obj_table, -+ unsigned n) -+{ -+ unsigned i; -+ struct rte_mbuf *m; -+ //PMD_DRV_LOG(INFO, "%s/n", __func__); -+ if ((pool->offload_ptr > MAX_BPID) || -+ !(bpid_info[pool->offload_ptr].bp_list)) { -+ -+ PMD_DRV_LOG(INFO, "DPAA2 buffer pool not configured\n"); -+ return -1; -+ } -+ for (i = 0; i < n; i++) { -+ m = (struct rte_mbuf *)(obj_table[i]); -+ dpaa2_mbuf_release((uint64_t)m->buf_addr, pool->offload_ptr); -+ } -+ -+ return 0; -+} -+ -+int hw_mbuf_init( -+ struct rte_mempool *mp, -+ void *_m) -+{ -+ struct rte_mbuf *m = (struct rte_mbuf *)((unsigned char *)_m + DPAA2_FD_PTA_SIZE + -+ DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES); -+ uint32_t mbuf_size, buf_len, priv_size, head_size; -+ uint32_t bpid; -+ -+ if ((mp->offload_ptr > MAX_BPID) || -+ !(bpid_info[mp->offload_ptr].bp_list)) { -+ -+ PMD_DRV_LOG(WARNING, "DPAA2 buffer pool not configured\n"); -+ return -1; -+ } -+ /*todo - assuming that h_bp_list will be at top node*/ -+ bpid = mp->offload_ptr; -+ -+ priv_size = rte_pktmbuf_priv_size(mp); -+ mbuf_size = sizeof(struct rte_mbuf) + priv_size; -+ -+ RTE_MBUF_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size); -+ RTE_MBUF_ASSERT(mp->elt_size >= mbuf_size); -+ -+ memset(_m, 0, mp->elt_size); -+ -+ /*update it in global list as well */ -+ bpid_info[bpid].meta_data_size = DPAA2_RES; -+ -+/* head_size = DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION -+ + RTE_PKTMBUF_HEADROOM; -+ head_size = DPAA2_ALIGN_ROUNDUP(head_size, -+ DPAA2_PACKET_LAYOUT_ALIGN); -+ head_size -= DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION; -+*/ -+ head_size = RTE_PKTMBUF_HEADROOM; -+ -+ buf_len = rte_pktmbuf_data_room_size(mp) -+ - (DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES /* dummy */); -+ -+ RTE_MBUF_ASSERT(buf_len <= UINT16_MAX); -+ -+ /* start of buffer is after mbuf structure and priv data */ -+ m->priv_size = priv_size; -+ m->buf_addr = (char *)m + mbuf_size ; -+ m->buf_physaddr = rte_mempool_virt2phy(mp, _m) + DPAA2_FD_PTA_SIZE + -+ DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES + mbuf_size; -+ m->buf_len = (uint16_t)buf_len; -+ -+ /* keep some headroom between start of buffer and data */ -+ m->data_off = RTE_MIN(head_size, (uint16_t)m->buf_len); -+ /* init some constant fields */ -+ m->pool = mp; -+ m->nb_segs = 1; -+ m->port = 0xff; -+ -+ /* Release the mempool buffer to BMAN */ -+ dpaa2_mbuf_release((uint64_t)_m, bpid); -+ return 0; -+} -+ -diff --git a/drivers/net/dpaa2/rte_eth_dpio.c b/drivers/net/dpaa2/rte_eth_dpio.c -new file mode 100644 -index 0000000..23f0b08 ---- /dev/null -+++ b/drivers/net/dpaa2/rte_eth_dpio.c -@@ -0,0 +1,339 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "rte_pci.h" -+#include "rte_memzone.h" -+#include -+ -+#include "rte_eth_dpaa2_pvt.h" -+#include "fsl_qbman_portal.h" -+#include -+ -+#include -+#include "dpaa2_logs.h" -+ -+#define NUM_HOST_CPUS RTE_MAX_LCORE -+ -+__thread struct thread_io_info_t thread_io_info; -+ -+TAILQ_HEAD(dpio_device_list, dpaa2_dpio_dev); -+static struct dpio_device_list *dpio_dev_list; /*!< DPIO device list */ -+static uint32_t io_space_count; -+ -+/*Stashing Macros*/ -+#define DPAA2_CORE_CLUSTER_BASE 0x04 -+#define DPAA2_CORE_CLUSTER_FIRST (DPAA2_CORE_CLUSTER_BASE + 0) -+#define DPAA2_CORE_CLUSTER_SECOND (DPAA2_CORE_CLUSTER_BASE + 1) -+#define DPAA2_CORE_CLUSTER_THIRD (DPAA2_CORE_CLUSTER_BASE + 2) -+#define DPAA2_CORE_CLUSTER_FOURTH (DPAA2_CORE_CLUSTER_BASE + 3) -+ -+#define DPAA2_CORE_CLUSTER_GET(sdest, cpu_id) \ -+do { \ -+ if (cpu_id == 0 || cpu_id == 1) \ -+ sdest = DPAA2_CORE_CLUSTER_FIRST; \ -+ else if (cpu_id == 2 || cpu_id == 3) \ -+ sdest = DPAA2_CORE_CLUSTER_SECOND; \ -+ else if (cpu_id == 4 || cpu_id == 5) \ -+ sdest = DPAA2_CORE_CLUSTER_THIRD; \ -+ else \ -+ sdest = DPAA2_CORE_CLUSTER_FOURTH; \ -+} while (0) -+ -+static int -+configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev) -+{ -+ struct qbman_swp_desc p_des; -+ struct dpio_attr attr; -+ -+ dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io)); -+ if (!dpio_dev->dpio) { -+ PMD_DRV_LOG(ERR, "Memory allocation failure\n"); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "\t Alocated DPIO[%p]\n", dpio_dev->dpio); -+ dpio_dev->dpio->regs = dpio_dev->mc_portal; -+ if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id, -+ &dpio_dev->token)) { -+ PMD_DRV_LOG(ERR, "Failed to allocate IO space\n"); -+ free(dpio_dev->dpio); -+ return -1; -+ } -+ -+ if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) { -+ PMD_DRV_LOG(ERR, "Failed to Enable dpio\n"); -+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); -+ free(dpio_dev->dpio); -+ return -1; -+ } -+ -+ if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW, -+ dpio_dev->token, &attr)) { -+ PMD_DRV_LOG(ERR, "DPIO Get attribute failed\n"); -+ dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); -+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); -+ free(dpio_dev->dpio); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "Qbman Portal ID %d\n", attr.qbman_portal_id); -+ PMD_DRV_LOG(INFO, "Portal CE addr 0x%lX\n", attr.qbman_portal_ce_offset); -+ PMD_DRV_LOG(INFO, "Portal CI addr 0x%lX\n", attr.qbman_portal_ci_offset); -+ -+ /* Configure & setup SW portal */ -+ p_des.block = NULL; -+ p_des.idx = attr.qbman_portal_id; -+ p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr); -+ p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr); -+ p_des.irq = -1; -+ p_des.qman_version = attr.qbman_version; -+ -+ PMD_DRV_LOG(INFO, "Portal CE addr 0x%p\n", p_des.cena_bar); -+ PMD_DRV_LOG(INFO, "Portal CI addr 0x%p\n", p_des.cinh_bar); -+ -+ dpio_dev->sw_portal = qbman_swp_init(&p_des); -+ if (dpio_dev->sw_portal == NULL) { -+ PMD_DRV_LOG(ERR, " QBMan SW Portal Init failed\n"); -+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); -+ free(dpio_dev->dpio); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "QBMan SW Portal 0x%p\n", dpio_dev->sw_portal); -+ -+ return 0; -+} -+ -+int dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev) -+{ -+ int sdest; -+ int cpu_id, ret; -+ -+ /* Set the Stashing Destination */ -+ cpu_id = rte_lcore_id(); -+ if (cpu_id < 0) { -+ cpu_id = rte_get_master_lcore(); -+ if (cpu_id < 0) { -+ PMD_DRV_LOG(ERR, "\tGetting CPU Index failed\n"); -+ return -1; -+ } -+ } -+ -+ /* -+ * In case of running DPDK on the Virtual Machine the Stashing -+ * Destination gets set in the H/W w.r.t. the Virtual CPU ID's. -+ * As a W.A. environment variable HOST_START_CPU tells which -+ * the offset of the host start core of the Virtual Machine threads. -+ */ -+ if (getenv("HOST_START_CPU")) { -+ cpu_id += -+ atoi(getenv("HOST_START_CPU")); -+ cpu_id = cpu_id % NUM_HOST_CPUS; -+ } -+ -+ /* Set the STASH Destination depending on Current CPU ID. -+ Valid values of SDEST are 4,5,6,7. Where, -+ CPU 0-1 will have SDEST 4 -+ CPU 2-3 will have SDEST 5.....and so on. -+ */ -+ DPAA2_CORE_CLUSTER_GET(sdest, cpu_id); -+ PMD_DRV_LOG(INFO, "Portal= %d CPU= %u SDEST= %d\n", -+ dpio_dev->index, cpu_id, sdest); -+ -+ ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW, -+ dpio_dev->token, sdest); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "%d ERROR in SDEST\n", ret); -+ return -1; -+ } -+ -+ return 0; -+} -+ -+int -+dpaa2_affine_qbman_swp(void) -+{ -+ struct dpaa2_dpio_dev *dpio_dev = NULL; -+ int ret; -+ -+ if (thread_io_info.dpio_dev) -+ return 0; -+ -+ /* Get DPIO dev handle from list using index */ -+ TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) { -+ if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count)) -+ break; -+ } -+ if (!dpio_dev) -+ return -1; -+ -+ /* Populate the thread_io_info structure */ -+ thread_io_info.dpio_dev = dpio_dev; -+ -+ ret = dpaa2_configure_stashing(dpio_dev); -+ if (ret) { -+ RTE_LOG(ERR, EAL, "dpaa2_configure_stashing failed"); -+ } -+ return ret; -+} -+ -+int -+dpaa2_affine_qbman_swp_sec(void) -+{ -+ struct dpaa2_dpio_dev *dpio_dev = NULL; -+ int ret; -+ -+ if (thread_io_info.sec_dpio_dev) -+ return 0; -+ -+ /* Get DPIO dev handle from list using index */ -+ TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) { -+ if (dpio_dev && rte_atomic16_read(&dpio_dev->ref_count) == 0) { -+ rte_atomic16_inc(&dpio_dev->ref_count); -+ break; -+ } -+ } -+ if (!dpio_dev) -+ return -1; -+ -+ /* Populate the thread_io_info structure */ -+ thread_io_info.sec_dpio_dev = dpio_dev; -+ -+ ret = dpaa2_configure_stashing(dpio_dev); -+ if (ret) { -+ RTE_LOG(ERR, EAL, "dpaa2_configure_stashing failed"); -+ } -+ return ret; -+} -+ -+int -+dpaa2_create_dpio_device(struct vfio_device *vdev, -+ struct vfio_device_info *obj_info, -+ int object_id) -+{ -+ struct dpaa2_dpio_dev *dpio_dev; -+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)}; -+ -+ if (obj_info->num_regions < NUM_DPIO_REGIONS) { -+ PMD_DRV_LOG(ERR, "ERROR, Not sufficient number " -+ "of DPIO regions.\n"); -+ return -1; -+ } -+ -+ if (!dpio_dev_list) { -+ dpio_dev_list = malloc(sizeof(struct dpio_device_list)); -+ if (NULL == dpio_dev_list) { -+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPIO list\n"); -+ return -1; -+ } -+ -+ /* Initialize the DPIO List */ -+ TAILQ_INIT(dpio_dev_list); -+ } -+ -+ dpio_dev = malloc(sizeof(struct dpaa2_dpio_dev)); -+ if (!dpio_dev) { -+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPIO Device\n"); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "\t Aloocated DPIO [%p]\n", dpio_dev); -+ dpio_dev->dpio = NULL; -+ dpio_dev->hw_id = object_id; -+ dpio_dev->vfio_fd = vdev->fd; -+ rte_atomic16_init(&dpio_dev->ref_count); -+ /* Using single portal for all devices */ -+ dpio_dev->mc_portal = mcp_ptr_list[MC_PORTAL_INDEX]; -+ -+ reg_info.index = 0; -+ if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { -+ printf("vfio: error getting region info\n"); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "\t Region Offset = %llx\n", reg_info.offset); -+ PMD_DRV_LOG(INFO, "\t Region Size = %llx\n", reg_info.size); -+ dpio_dev->ce_size = reg_info.size; -+ dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size, -+ PROT_WRITE | PROT_READ, MAP_SHARED, -+ dpio_dev->vfio_fd, reg_info.offset); -+ -+ /* Create Mapping for QBMan Cache Enabled area. This is a fix for -+ SMMU fault for DQRR statshing transaction. */ -+ if (vfio_dmamap_mem_region(dpio_dev->qbman_portal_ce_paddr, -+ reg_info.offset, reg_info.size)) { -+ PMD_DRV_LOG(ERR, "DMAMAP for Portal CE area failed.\n"); -+ return -1; -+ } -+ -+ reg_info.index = 1; -+ if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { -+ printf("vfio: error getting region info\n"); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "\t Region Offset = %llx\n", reg_info.offset); -+ PMD_DRV_LOG(INFO, "\t Region Size = %llx\n", reg_info.size); -+ dpio_dev->ci_size = reg_info.size; -+ dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size, -+ PROT_WRITE | PROT_READ, MAP_SHARED, -+ dpio_dev->vfio_fd, reg_info.offset); -+ -+ if (configure_dpio_qbman_swp(dpio_dev)) { -+ PMD_DRV_LOG(ERR, -+ "Failed in configuring the qbman portal for dpio %d\n", -+ dpio_dev->hw_id); -+ return -1; -+ } -+ -+ io_space_count++; -+ dpio_dev->index = io_space_count; -+ TAILQ_INSERT_HEAD(dpio_dev_list, dpio_dev, next); -+ -+ return 0; -+} -+ -diff --git a/drivers/net/dpaa2/rte_eth_dpni.c b/drivers/net/dpaa2/rte_eth_dpni.c -new file mode 100644 -index 0000000..62baf03 ---- /dev/null -+++ b/drivers/net/dpaa2/rte_eth_dpni.c -@@ -0,0 +1,2230 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+/* MC header files */ -+#include -+#include -+#include "rte_eth_dpaa2_pvt.h" -+#include "rte_eth_dpni_annot.h" -+#include "dpaa2_logs.h" -+ -+#include -+#include -+ -+#define DPAA2_STASHING -+ -+/* tx fd send batching */ -+#define QBMAN_MULTI_TX -+ -+#define RTE_ETH_DPAA2_SNAPSHOT_LEN 65535 -+#define RTE_ETH_DPAA2_SNAPLEN 4096 -+#define RTE_ETH_DPAA2_PROMISC 1 -+#define RTE_ETH_DPAA2_TIMEOUT -1 -+#define ETH_DPAA2_RX_IFACE_ARG "rx_iface" -+#define ETH_DPAA2_TX_IFACE_ARG "tx_iface" -+#define ETH_DPAA2_IFACE_ARG "iface" -+ -+static const char *drivername = "DPNI PMD"; -+ -+#define MAX_TCS DPNI_MAX_TC -+#define MAX_RX_QUEUES 64 -+#define MAX_TX_QUEUES 64 -+ -+/*Maximum number of slots available in TX ring*/ -+#define MAX_SLOTS 8 -+ -+/*Threshold for a queue to *Enter* Congestion state. -+ It is set to 128 frames of size 64 bytes.*/ -+#define CONG_ENTER_THRESHOLD 128*64 -+ -+/*Threshold for a queue to *Exit* Congestion state. -+ It is set to 98 frames of size 64 bytes*/ -+#define CONG_EXIT_THRESHOLD 98*64 -+ -+/*! Maximum number of flow distributions per traffic class */ -+#define MAX_DIST_PER_TC 16 -+ -+/* Size of the input SMMU mapped memory required by MC */ -+#define DIST_PARAM_IOVA_SIZE 256 -+ -+struct dpaa2_queue { -+ void *dev; -+ int32_t eventfd; /*!< Event Fd of this queue */ -+ uint32_t fqid; /*!< Unique ID of this queue */ -+ uint8_t tc_index; /*!< traffic class identifier */ -+ uint16_t flow_id; /*!< To be used by DPAA2 frmework */ -+ uint64_t rx_pkts; -+ uint64_t tx_pkts; -+ uint64_t err_pkts; -+ union { -+ struct queue_storage_info_t *q_storage; -+ struct qbman_result *cscn; -+ }; -+}; -+ -+struct dpaa2_dev_priv { -+ void *hw; -+ int32_t hw_id; -+ int32_t qdid; -+ uint16_t token; -+ uint8_t nb_tx_queues; -+ uint8_t nb_rx_queues; -+ void *rx_vq[MAX_RX_QUEUES]; -+ void *tx_vq[MAX_TX_QUEUES]; -+ -+ struct dpaa2_bp_list *bp_list; /**data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ uint64_t value; -+ -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME, &value); -+ printf("Rx packets: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_BYTE, &value); -+ printf("Rx bytes: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_MCAST_FRAME, &value); -+ printf("Rx Multicast: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME_DROP, &value); -+ printf("Rx dropped: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME_DISCARD, &value); -+ printf("Rx discarded: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_FRAME, &value); -+ printf("Tx packets: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_BYTE, &value); -+ printf("Tx bytes: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_FRAME_DISCARD, &value); -+ printf("Tx dropped: %ld\n", value); -+} -+ -+/** -+ * Atomically reads the link status information from global -+ * structure rte_eth_dev. -+ * -+ * @param dev -+ * - Pointer to the structure rte_eth_dev to read from. -+ * - Pointer to the buffer to be saved with the link status. -+ * -+ * @return -+ * - On success, zero. -+ * - On failure, negative value. -+ */ -+static inline int -+rte_dpni_dev_atomic_read_link_status(struct rte_eth_dev *dev, -+ struct rte_eth_link *link) -+{ -+ struct rte_eth_link *dst = link; -+ struct rte_eth_link *src = &dev->data->dev_link; -+ -+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, -+ *(uint64_t *)src) == 0) -+ return -1; -+ -+ return 0; -+} -+ -+/** -+ * Atomically writes the link status information into global -+ * structure rte_eth_dev. -+ * -+ * @param dev -+ * - Pointer to the structure rte_eth_dev to read from. -+ * - Pointer to the buffer to be saved with the link status. -+ * -+ * @return -+ * - On success, zero. -+ * - On failure, negative value. -+ */ -+static inline int -+rte_dpni_dev_atomic_write_link_status(struct rte_eth_dev *dev, -+ struct rte_eth_link *link) -+{ -+ struct rte_eth_link *dst = &dev->data->dev_link; -+ struct rte_eth_link *src = link; -+ -+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, -+ *(uint64_t *)src) == 0) -+ return -1; -+ -+ return 0; -+} -+ -+static inline void -+dpaa2_eth_parse_packet(struct rte_mbuf *mbuf) -+{ -+ uint32_t pkt_type = 0; -+ struct pkt_annotation *annotation = (struct pkt_annotation *) -+ ((uint8_t *)mbuf - (DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES)); -+ -+ PMD_DRV_LOG(DEBUG, "\n 1 annotation = 0x%x ", annotation->word4); -+ -+ if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L2_ETHER; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV4; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT)) -+ pkt_type /* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV6; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV4_EXT; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_UDP; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_TCP; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_SCTP; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_ICMP; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_UNKNOWN; -+ -+ mbuf->packet_type = pkt_type; -+} -+ -+static inline -+struct rte_mbuf *eth_fd_to_mbuf(const struct qbman_fd *fd) -+{ -+ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(DPAA2_GET_FD_ADDR(fd)); -+ -+ PMD_DRV_LOG(DEBUG, "\nmbuf %p BMAN buf addr %p", -+ (void *)mbuf, mbuf->buf_addr); -+ -+ PMD_DRV_LOG(DEBUG, "\nfdaddr =%lx bpid =%d meta =%d off =%d, len =%d\n", -+ DPAA2_GET_FD_ADDR(fd), -+ DPAA2_GET_FD_BPID(fd), -+ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, -+ DPAA2_GET_FD_OFFSET(fd), -+ DPAA2_GET_FD_LEN(fd)); -+ -+// mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); -+ mbuf->data_len = DPAA2_GET_FD_LEN(fd); -+ mbuf->pkt_len = mbuf->data_len; -+ mbuf->next = NULL; -+ rte_mbuf_refcnt_set(mbuf, 1); -+ -+ /* Parse the packet */ -+ dpaa2_eth_parse_packet(mbuf); -+ -+ mbuf->nb_segs = 1; -+ mbuf->ol_flags = 0; -+ -+ return mbuf; -+} -+ -+static void __attribute__ ((noinline)) eth_mbuf_to_fd(struct rte_mbuf *mbuf, -+ struct qbman_fd *fd, uint16_t bpid) -+{ -+ /*Resetting the buffer pool id and offset field*/ -+ fd->simple.bpid_offset = 0; -+ -+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(DPAA2_BUF_FROM_INLINE_MBUF(mbuf))); -+ DPAA2_SET_FD_LEN(fd, mbuf->data_len); -+ DPAA2_SET_FD_BPID(fd, bpid); -+ DPAA2_SET_FD_OFFSET(fd, DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + -+ DPAA2_RES /* dummy */+ 128 + mbuf->priv_size + mbuf->data_off); -+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); -+ -+ PMD_DRV_LOG(DEBUG, "\nmbuf %p BMAN buf addr %p", -+ (void *)mbuf, mbuf->buf_addr); -+ -+ PMD_DRV_LOG(DEBUG, "\nfdaddr =%lx bpid =%d meta =%d off =%d, len =%d\n", -+ DPAA2_GET_FD_ADDR(fd), -+ DPAA2_GET_FD_BPID(fd), -+ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, -+ DPAA2_GET_FD_OFFSET(fd), -+ DPAA2_GET_FD_LEN(fd)); -+ -+ return; -+} -+ -+static int eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, -+ struct qbman_fd *fd, uint16_t bpid) -+{ -+ struct rte_mbuf *m; -+ void *mb = NULL; -+ -+ if (hw_mbuf_alloc(bpid_info[bpid].bp_list->buf_pool.mp, &mb)) { -+ PMD_DRV_LOG(WARNING, "Unable to allocated DPAA2 buffer"); -+ rte_pktmbuf_free(mbuf); -+ return -1; -+ } -+ m = (struct rte_mbuf *)mb; -+ memcpy((char *)m->buf_addr + mbuf->data_off, -+ (void *)((char *)mbuf->buf_addr + mbuf->data_off), -+ mbuf->pkt_len); -+ -+ /*Resetting the buffer pool id and offset field*/ -+ fd->simple.bpid_offset = 0; -+ -+ DPAA2_SET_FD_ADDR(fd, m->buf_addr); -+ DPAA2_SET_FD_LEN(fd, mbuf->data_len); -+ DPAA2_SET_FD_BPID(fd, bpid); -+ DPAA2_SET_FD_OFFSET(fd, mbuf->data_off); -+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); -+ -+ PMD_DRV_LOG(DEBUG, "\nmbuf %p BMAN buf addr %p", -+ (void *)mbuf, mbuf->buf_addr); -+ -+ PMD_DRV_LOG(DEBUG, "\nfdaddr =%lx bpid =%d meta =%d off =%d, len =%d\n", -+ DPAA2_GET_FD_ADDR(fd), -+ DPAA2_GET_FD_BPID(fd), -+ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, -+ DPAA2_GET_FD_OFFSET(fd), -+ DPAA2_GET_FD_LEN(fd)); -+ /*free the original packet */ -+ rte_pktmbuf_free(mbuf); -+ -+ return 0; -+} -+ -+static uint16_t -+eth_dpaa2_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) -+{ -+ /* Function is responsible to receive frames for a given device and VQ*/ -+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; -+ struct qbman_result *dq_storage; -+ uint32_t fqid = dpaa2_q->fqid; -+ int ret, num_rx = 0; -+ uint8_t is_last = 0, status; -+ struct qbman_swp *swp; -+ const struct qbman_fd *fd; -+ struct qbman_pull_desc pulldesc; -+ struct rte_eth_dev *dev = dpaa2_q->dev; -+ -+ if (!thread_io_info.dpio_dev) { -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in affining portal\n"); -+ return 0; -+ } -+ } -+ swp = thread_io_info.dpio_dev->sw_portal; -+ dq_storage = dpaa2_q->q_storage->dq_storage[0]; -+ -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts); -+ qbman_pull_desc_set_fq(&pulldesc, fqid); -+ /* todo optimization - we can have dq_storage_phys available*/ -+ qbman_pull_desc_set_storage(&pulldesc, dq_storage, -+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); -+ -+ /*Issue a volatile dequeue command. */ -+ while (1) { -+ if (qbman_swp_pull(swp, &pulldesc)) { -+ PMD_DRV_LOG(ERR, "VDQ command is not issued." -+ "QBMAN is busy\n"); -+ /* Portal was busy, try again */ -+ continue; -+ } -+ break; -+ }; -+ -+ /* Receive the packets till Last Dequeue entry is found with -+ respect to the above issues PULL command. -+ */ -+ while (!is_last) { -+ /*Check if the previous issued command is completed. -+ *Also seems like the SWP is shared between the Ethernet Driver -+ *and the SEC driver.*/ -+ while(!qbman_check_command_complete(swp, dq_storage)) -+ ; -+ /* Loop until the dq_storage is updated with -+ * new token by QBMAN */ -+ while (!qbman_result_has_new_result(swp, dq_storage)) -+ ; -+ /* Check whether Last Pull command is Expired and -+ setting Condition for Loop termination */ -+ if (qbman_result_DQ_is_pull_complete(dq_storage)) { -+ is_last = 1; -+ /* Check for valid frame. */ -+ status = (uint8_t)qbman_result_DQ_flags(dq_storage); -+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { -+ PMD_DRV_LOG(DEBUG, "No frame is delivered\n"); -+ continue; -+ } -+ } -+ -+ fd = qbman_result_DQ_fd(dq_storage); -+ bufs[num_rx] = eth_fd_to_mbuf(fd); -+ bufs[num_rx]->port = dev->data->port_id; -+ -+ num_rx++; -+ dq_storage++; -+ } /* End of Packet Rx loop */ -+ -+ dpaa2_q->rx_pkts += num_rx; -+ -+ PMD_DRV_LOG(INFO, "Ethernet Received %d Packets\n", num_rx); -+ /*Return the total number of packets received to DPAA2 app*/ -+ return num_rx; -+} -+ -+static uint16_t -+eth_dpaa2_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) -+{ -+ /* Function is responsible to receive frames for a given device and VQ*/ -+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; -+ struct qbman_result *dq_storage; -+ uint32_t fqid = dpaa2_q->fqid; -+ int ret, i, num_rx = 0; -+ uint8_t is_last = 0, status; -+ struct qbman_swp *swp; -+ const struct qbman_fd *fd[16]; -+ struct qbman_pull_desc pulldesc; -+ struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; -+ struct rte_eth_dev *dev = dpaa2_q->dev; -+ -+ if(!thread_io_info.dpio_dev) { -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in affining portal\n"); -+ return 0; -+ } -+ } -+ swp = thread_io_info.dpio_dev->sw_portal; -+ -+ if(!q_storage->active_dqs) { -+ q_storage->toggle = 0; -+ dq_storage = q_storage->dq_storage[q_storage->toggle]; -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts); -+ qbman_pull_desc_set_fq(&pulldesc, fqid); -+ qbman_pull_desc_set_storage(&pulldesc, dq_storage, -+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); -+ if(thread_io_info.global_active_dqs) { -+ while(!qbman_check_command_complete(swp, thread_io_info.global_active_dqs)) -+ ; -+ } -+ while (1) { -+ if (qbman_swp_pull(swp, &pulldesc)) { -+ PMD_DRV_LOG(WARNING, "VDQ command is not issued." -+ "QBMAN is busy\n"); -+ /* Portal was busy, try again */ -+ continue; -+ } -+ break; -+ } -+ q_storage->active_dqs = dq_storage; -+ thread_io_info.global_active_dqs = dq_storage; -+ } -+ while(!qbman_check_command_complete(swp, thread_io_info.global_active_dqs)) -+ ; -+ dq_storage = q_storage->active_dqs; -+ while (!is_last) { -+ /* Loop until the dq_storage is updated with -+ * new token by QBMAN */ -+ struct rte_mbuf *mbuf; -+ -+ while (!qbman_result_has_new_result(swp, dq_storage)) -+ ; -+ rte_prefetch0((void *)((uint64_t)(dq_storage + 1))); -+ /* Check whether Last Pull command is Expired and -+ setting Condition for Loop termination */ -+ if (qbman_result_DQ_is_pull_complete(dq_storage)) { -+ is_last = 1; -+ /* Check for valid frame. */ -+ status = (uint8_t)qbman_result_DQ_flags(dq_storage); -+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { -+ PMD_DRV_LOG(DEBUG, "No frame is delivered\n"); -+ continue; -+ } -+ } -+ fd[num_rx] = qbman_result_DQ_fd(dq_storage); -+ mbuf = DPAA2_INLINE_MBUF_FROM_BUF(DPAA2_GET_FD_ADDR(fd[num_rx])); -+ /* Prefeth mbuf */ -+ rte_prefetch0(mbuf); -+ /* Prefetch Annotation address from where we get parse results */ -+ rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx]) + DPAA2_FD_PTA_SIZE + 16)); -+ /*Prefetch Data buffer*/ -+ /* rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx]) + DPAA2_GET_FD_OFFSET(fd[num_rx]))); */ -+ dq_storage++; -+ num_rx++; -+ -+ } /* End of Packet Rx loop */ -+ -+ for (i = 0; i < num_rx; i++) { -+ bufs[i] = eth_fd_to_mbuf(fd[i]); -+ bufs[i]->port = dev->data->port_id; -+ } -+ -+ q_storage->toggle ^= 1; -+ dq_storage = q_storage->dq_storage[q_storage->toggle]; -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts); -+ qbman_pull_desc_set_fq(&pulldesc, fqid); -+ qbman_pull_desc_set_storage(&pulldesc, dq_storage, -+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); -+ /*Issue a volatile dequeue command. */ -+ -+ while (1) { -+ if (qbman_swp_pull(swp, &pulldesc)) { -+ PMD_DRV_LOG(WARNING, "VDQ command is not issued." -+ "QBMAN is busy\n"); -+ continue; -+ } -+ break; -+ } -+ q_storage->active_dqs = dq_storage; -+ thread_io_info.global_active_dqs = dq_storage; -+ -+ dpaa2_q->rx_pkts += num_rx; -+ -+ PMD_DRV_LOG(INFO, "Ethernet Received %d Packets\n", num_rx); -+ /*Return the total number of packets received to DPAA2 app*/ -+ return num_rx; -+} -+ -+/* -+ * Callback to handle sending packets through a real NIC. -+ */ -+static uint16_t -+eth_dpaa2_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) -+{ -+ /* Function to transmit the frames to given device and VQ*/ -+ uint32_t loop; -+ int32_t ret; -+#ifdef QBMAN_MULTI_TX -+ struct qbman_fd fd_arr[8]; -+ uint32_t frames_to_send; -+#else -+ struct qbman_fd fd; -+#endif -+ struct rte_mempool *mp; -+ struct qbman_eq_desc eqdesc; -+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; -+ struct qbman_swp *swp; -+ uint16_t num_tx = 0; -+ /*todo - need to support multiple buffer pools */ -+ uint16_t bpid; -+ struct rte_eth_dev *dev = dpaa2_q->dev; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ -+ if (!thread_io_info.dpio_dev) { -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in affining portal\n"); -+ return 0; -+ } -+ } -+ swp = thread_io_info.dpio_dev->sw_portal; -+ -+ /*Prepare enqueue descriptor*/ -+ qbman_eq_desc_clear(&eqdesc); -+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); -+ qbman_eq_desc_set_response(&eqdesc, 0, 0); -+ qbman_eq_desc_set_qd(&eqdesc, priv->qdid, -+ dpaa2_q->flow_id, dpaa2_q->tc_index); -+ -+ /*Clear the unused FD fields before sending*/ -+#ifdef QBMAN_MULTI_TX -+ while(nb_pkts) { -+ /*Check if the queue is congested*/ -+ if(qbman_result_is_CSCN(dpaa2_q->cscn)) -+ goto skip_tx; -+ -+ frames_to_send = (nb_pkts >> 3) ? MAX_SLOTS : nb_pkts; -+ -+ for(loop = 0; loop < frames_to_send; loop++) { -+ fd_arr[loop].simple.frc = 0; -+ DPAA2_RESET_FD_CTRL((&fd_arr[loop])); -+ DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL); -+ mp = (*bufs)->pool; -+ /* Not a hw_pkt pool allocated frame */ -+ if (mp && !(mp->flags & MEMPOOL_F_HW_PKT_POOL)) { -+ printf ("\n non hw offload bufffer "); -+ /* alloc should be from the default buffer pool -+ attached to this interface */ -+ bpid = priv->bp_list->buf_pool.bpid; -+ if (eth_copy_mbuf_to_fd(*bufs, &fd_arr[loop], bpid)) { -+ bufs++; -+ continue; -+ } -+ } else { -+ bpid = mp->offload_ptr; -+ eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid); -+ } -+ bufs++; -+ } -+ loop = 0; -+ while(loop < frames_to_send) { -+ loop += qbman_swp_send_multiple(swp, &eqdesc, -+ &fd_arr[loop], frames_to_send - loop); -+ } -+ -+ num_tx += frames_to_send; -+ dpaa2_q->tx_pkts += frames_to_send; -+ nb_pkts -= frames_to_send; -+ } -+#else -+ /*Check if the queue is congested*/ -+// if(qbman_result_is_CSCN(dpaa2_q->cscn)) -+// goto skip_tx; -+ -+ fd.simple.frc = 0; -+ DPAA2_RESET_FD_CTRL((&fd)); -+ DPAA2_SET_FD_FLC((&fd), NULL); -+ loop = 0; -+ -+ while (loop < nb_pkts) { -+ /*Prepare each packet which is to be sent*/ -+ mp = bufs[loop]->pool; -+ /* Not a hw_pkt pool allocated frame */ -+ if (mp && !(mp->flags & MEMPOOL_F_HW_PKT_POOL)) { -+ printf ("\n non hw offload bufffer "); -+ /* alloc should be from the default buffer pool -+ attached to this interface */ -+ if (priv->bp_list) -+ bpid = priv->bp_list->buf_pool.bpid; -+ else -+ printf("\n ??? why no bpool attached"); -+ -+ if (eth_copy_mbuf_to_fd(bufs[loop], &fd, bpid)) { -+ loop++; -+ continue; -+ } -+ } else { -+ bpid = mp->offload_ptr; -+ eth_mbuf_to_fd(bufs[loop], &fd, bpid); -+ } -+ /*Enqueue a single packet to the QBMAN*/ -+ do { -+ ret = qbman_swp_enqueue(swp, &eqdesc, &fd); -+ if (ret != 0) { -+ PMD_DRV_LOG(DEBUG, "Error in transmiting the frame\n"); -+ } -+ } while (ret != 0); -+ -+ /* Free the buffer shell */ -+ /* rte_pktmbuf_free(bufs[loop]); */ -+ num_tx++; loop++; -+ } -+ dpaa2_q->tx_pkts += num_tx; -+ dpaa2_q->err_pkts += nb_pkts - num_tx; -+#endif -+ skip_tx: -+ return num_tx; -+} -+ -+static int -+dpaa2_vlan_stripping_set(struct rte_eth_dev *dev, int on) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ PMD_INIT_FUNC_TRACE(); -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return -1; -+ } -+ -+ ret = dpni_set_vlan_removal(dpni, CMD_PRI_LOW, priv->token, on); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to dpni_set_vlan_removal hwid =%d", -+ priv->hw_id); -+ return ret; -+} -+ -+static int -+dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return -1; -+ } -+ -+ if (on) -+ ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id); -+ else -+ ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id); -+ -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d", -+ ret, vlan_id, priv->hw_id); -+ -+ /*todo this should on global basis */ -+/* ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, on); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to set vlan filter"); -+*/ return ret; -+} -+ -+static void -+dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ int ret; -+ if (mask & ETH_VLAN_FILTER_MASK) { -+ if (dev->data->dev_conf.rxmode.hw_vlan_filter) -+ ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, TRUE); -+ else -+ ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, FALSE); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "ret = %d Unable to set vlan filter", ret); -+ } -+ -+ if (mask & ETH_VLAN_STRIP_MASK) { -+ /* Enable or disable VLAN stripping */ -+ if (dev->data->dev_conf.rxmode.hw_vlan_strip) -+ dpaa2_vlan_stripping_set(dev, TRUE); -+ else -+ dpaa2_vlan_stripping_set(dev, FALSE); -+ } -+ -+ if (mask & ETH_VLAN_EXTEND_MASK) { -+ PMD_INIT_FUNC_TRACE(); -+/* if (dev->data->dev_conf.rxmode.hw_vlan_extend) -+ i40e_vsi_config_double_vlan(vsi, TRUE); -+ else -+ i40e_vsi_config_double_vlan(vsi, FALSE); -+*/ } -+} -+ -+static void -+dpaa2_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ -+ dev_info->driver_name = drivername; -+ dev_info->if_index = priv->hw_id; -+ dev_info->max_mac_addrs = priv->max_unicast_filters; -+ dev_info->max_rx_pktlen = (uint32_t)-1; -+ dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; -+ dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; -+ dev_info->min_rx_bufsize = 0; -+ dev_info->pci_dev = dev->pci_dev; -+/* dev_info->rx_offload_capa = -+ DEV_RX_OFFLOAD_IPV4_CKSUM | -+ DEV_RX_OFFLOAD_UDP_CKSUM | -+ DEV_RX_OFFLOAD_TCP_CKSUM; -+ dev_info->tx_offload_capa = -+ DEV_TX_OFFLOAD_IPV4_CKSUM | -+ DEV_TX_OFFLOAD_UDP_CKSUM | -+ DEV_TX_OFFLOAD_TCP_CKSUM | -+ DEV_TX_OFFLOAD_SCTP_CKSUM; -+*/ -+} -+ -+static int -+dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ uint8_t tc_idx; -+ uint16_t dist_idx; -+ uint32_t vq_id; -+ struct dpaa2_queue *mc_q, *mcq; -+ uint32_t tot_queues; -+ int i; -+ struct dpaa2_queue *dpaa2_q; -+ tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; -+ mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, -+ RTE_CACHE_LINE_SIZE); -+ if (!mc_q) { -+ PMD_DRV_LOG(ERR, "malloc failed for rx/tx queues\n"); -+ return -1; -+ } -+ -+ for (i = 0; i < priv->nb_rx_queues; i++) { -+ mc_q->dev = dev; -+ priv->rx_vq[i] = mc_q++; -+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; -+ dpaa2_q->q_storage = rte_malloc("dq_storage", -+ sizeof(struct queue_storage_info_t), -+ RTE_CACHE_LINE_SIZE); -+ if(!dpaa2_q->q_storage) -+ goto fail; -+ -+ memset(dpaa2_q->q_storage, 0, sizeof(struct queue_storage_info_t)); -+ } -+ -+ for (i = 0; i < priv->nb_tx_queues; i++) { -+ mc_q->dev = dev; -+ priv->tx_vq[i] = mc_q++; -+ } -+ -+ vq_id = 0; -+ for (tc_idx = 0; tc_idx < priv->num_tc; tc_idx++) { -+ for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[tc_idx]; dist_idx++) { -+ mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; -+ mcq->tc_index = tc_idx; -+ mcq->flow_id = dist_idx; -+ vq_id++; -+ } -+ } -+ -+ return 0; -+fail: -+ i -= 1; -+ while(i >= 0) -+ { -+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; -+ rte_free(dpaa2_q->q_storage); -+ } -+ return -1; -+} -+ -+static void dpaa2_distset_to_dpkg_profile_cfg( -+ uint32_t req_dist_set, -+ struct dpkg_profile_cfg *kg_cfg) -+{ -+ uint32_t loop = 0, i = 0, dist_field = 0; -+ int l2_configured = 0, l3_configured = 0; -+ int l4_configured = 0, sctp_configured = 0; -+ -+ memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); -+ while (req_dist_set) { -+ if (req_dist_set % 2 != 0) { -+ dist_field = 1U << loop; -+ switch (dist_field) { -+ case ETH_RSS_L2_PAYLOAD: -+ -+ if (l2_configured) -+ break; -+ l2_configured = 1; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_ETH; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_ETH_TYPE; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ break; -+ -+ case ETH_RSS_IPV4: -+ case ETH_RSS_FRAG_IPV4: -+ case ETH_RSS_NONFRAG_IPV4_OTHER: -+ case ETH_RSS_IPV6: -+ case ETH_RSS_FRAG_IPV6: -+ case ETH_RSS_NONFRAG_IPV6_OTHER: -+ case ETH_RSS_IPV6_EX: -+ -+ if (l3_configured) -+ break; -+ l3_configured = 1; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_IP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_IP_SRC; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_IP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_IP_DST; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_IP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_IP_PROTO; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ kg_cfg->num_extracts++; -+ i++; -+ break; -+ -+ case ETH_RSS_NONFRAG_IPV4_TCP: -+ case ETH_RSS_NONFRAG_IPV6_TCP: -+ case ETH_RSS_NONFRAG_IPV4_UDP: -+ case ETH_RSS_NONFRAG_IPV6_UDP: -+ -+ if (l4_configured) -+ break; -+ l4_configured = 1; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_TCP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_TCP_PORT_SRC; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_TCP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_TCP_PORT_SRC; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ break; -+ -+ case ETH_RSS_NONFRAG_IPV4_SCTP: -+ case ETH_RSS_NONFRAG_IPV6_SCTP: -+ -+ if (sctp_configured) -+ break; -+ sctp_configured = 1; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_SCTP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_SCTP_PORT_SRC; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_SCTP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_SCTP_PORT_DST; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ break; -+ -+ default: -+ PMD_DRV_LOG(WARNING, "Bad flow distribution option %x\n", dist_field); -+ } -+ } -+ req_dist_set = req_dist_set >> 1; -+ loop++; -+ } -+ kg_cfg->num_extracts = i; -+} -+ -+static int dpaa2_setup_flow_distribution(struct rte_eth_dev *eth_dev, -+ uint32_t req_dist_set) -+{ -+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; -+ struct fsl_mc_io *dpni = priv->hw; -+ struct dpni_rx_tc_dist_cfg tc_cfg; -+ struct dpkg_profile_cfg kg_cfg; -+ void *p_params; -+ int ret, tc_index = 0; -+ -+ p_params = rte_malloc( -+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); -+ if (!p_params) { -+ PMD_DRV_LOG(ERR, "Memory unavaialble\n"); -+ return -ENOMEM; -+ } -+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE); -+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); -+ -+ dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg); -+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); -+ tc_cfg.dist_size = eth_dev->data->nb_rx_queues; -+ tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; -+ -+ ret = dpni_prepare_key_cfg(&kg_cfg, p_params); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Unable to prepare extract parameters\n"); -+ rte_free(p_params); -+ return ret; -+ } -+ -+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, -+ &tc_cfg); -+ rte_free(p_params); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Setting distribution for Rx failed with" -+ "err code: %d\n", ret); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int -+dpaa2_remove_flow_distribution(struct rte_eth_dev *eth_dev, uint8_t tc_index) -+{ -+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; -+ struct fsl_mc_io *dpni = priv->hw; -+ struct dpni_rx_tc_dist_cfg tc_cfg; -+ struct dpkg_profile_cfg kg_cfg; -+ void *p_params; -+ int ret; -+ -+ p_params = rte_malloc( -+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); -+ if (!p_params) { -+ PMD_DRV_LOG(ERR, "Memory unavaialble\n"); -+ return -ENOMEM; -+ } -+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE); -+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); -+ -+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); -+ tc_cfg.dist_size = 0; -+ tc_cfg.dist_mode = DPNI_DIST_MODE_NONE; -+ -+ ret = dpni_prepare_key_cfg(&kg_cfg, p_params); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Unable to prepare extract parameters\n"); -+ rte_free(p_params); -+ return ret; -+ } -+ -+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, -+ &tc_cfg); -+ rte_free(p_params); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Setting distribution for Rx failed with" -+ "err code: %d\n", ret); -+ return ret; -+ } -+ return ret; -+} -+ -+static int -+dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage) -+{ -+ int i=0; -+ -+ for(i = 0;i < NUM_DQS_PER_QUEUE; i++) { -+ q_storage->dq_storage[i] = rte_malloc(NULL, -+ NUM_MAX_RECV_FRAMES * sizeof(struct qbman_result), -+ RTE_CACHE_LINE_SIZE); -+ if(!q_storage->dq_storage[i]) -+ goto fail; -+ /*setting toggle for initial condition*/ -+ q_storage->toggle = -1; -+ } -+ return 0; -+fail: -+ i -= 1; -+ while(i >= 0) -+ { -+ rte_free(q_storage->dq_storage[i]); -+ } -+ return -1; -+} -+ -+static int -+dpaa2_eth_dev_configure(struct rte_eth_dev *dev) -+{ -+ struct rte_eth_dev_data *data = dev->data; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct rte_eth_conf *eth_conf = &data->dev_conf; -+ struct dpaa2_queue *dpaa2_q; -+ int i, ret; -+ -+ for (i = 0; i < data->nb_rx_queues; i++) { -+ data->rx_queues[i] = priv->rx_vq[i]; -+ dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; -+ if(dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) -+ return -1; -+ } -+ -+ for (i = 0; i < data->nb_tx_queues; i++) { -+ data->tx_queues[i] = priv->tx_vq[i]; -+ dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; -+ dpaa2_q->cscn = rte_malloc(NULL, sizeof(struct qbman_result), 16); -+ if(!dpaa2_q->cscn) -+ goto fail_tx_queue; -+ } -+ -+ /* Check for correct configuration */ -+ if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS && -+ data->nb_rx_queues > 1) { -+ PMD_DRV_LOG(ERR, "Distribution is not enabled, " -+ "but Rx queues more than 1\n"); -+ return -1; -+ } -+ -+ if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { -+ /* Return in case number of Rx queues is 1 */ -+ if (data->nb_rx_queues == 1) -+ return 0; -+ ret = dpaa2_setup_flow_distribution(dev, -+ eth_conf->rx_adv_conf.rss_conf.rss_hf); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "dpaa2_setup_flow_distribution failed\n"); -+ return ret; -+ } -+ } -+ -+ return 0; -+ fail_tx_queue: -+ i -= 1; -+ while(i >= 0) { -+ dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; -+ rte_free(dpaa2_q->cscn); -+ } -+ return -1; -+} -+ -+static int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, -+ void *blist) -+{ -+ /* Function to attach a DPNI with a buffer pool list. Buffer pool list -+ * handle is passed in blist. -+ */ -+ int32_t retcode; -+ struct fsl_mc_io *dpni = priv->hw; -+ struct dpni_pools_cfg bpool_cfg; -+ struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist; -+ -+ /*Attach buffer pool to the network interface as described by the user*/ -+ bpool_cfg.num_dpbp = 1; -+ bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id; -+ bpool_cfg.pools[0].backup_pool = 0; -+ bpool_cfg.pools[0].buffer_size = -+ DPAA2_ALIGN_ROUNDUP(bp_list->buf_pool.size, -+ DPAA2_PACKET_LAYOUT_ALIGN);; -+ -+ retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg); -+ if (retcode != 0) { -+ PMD_DRV_LOG(ERR, "Error in attaching the buffer pool list" -+ "bpid = %d Error code = %d\n", -+ bpool_cfg.pools[0].dpbp_id, retcode); -+ return retcode; -+ } -+ -+ priv->bp_list = bp_list; -+ return 0; -+} -+ -+/* Function to setup RX flow information. It contains traffic class ID, -+ * flow ID, destination configuration etc. -+ */ -+static int -+dpaa2_rx_queue_setup(struct rte_eth_dev *dev, -+ uint16_t rx_queue_id, -+ uint16_t nb_rx_desc __rte_unused, -+ unsigned int socket_id __rte_unused, -+ const struct rte_eth_rxconf *rx_conf __rte_unused, -+ struct rte_mempool *mb_pool) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ struct dpaa2_queue *dpaa2_q; -+ struct dpni_queue_cfg cfg; -+ uint8_t tc_id, flow_id; -+ int ret; -+ -+ PMD_DRV_LOG(INFO, "\n dev =%p, queue =%d, pool = %p, conf =%p", -+ dev, rx_queue_id, mb_pool, rx_conf); -+ -+ if (!priv->bp_list) { -+ if (mb_pool->offload_ptr > MAX_BPID) { -+ printf ("\n ??? ERR - %s not a offloaded buffer pool", -+ __func__); -+ return -1; -+ } -+ ret = dpaa2_attach_bp_list(priv, -+ bpid_info[mb_pool->offload_ptr].bp_list); -+ if (ret) -+ return ret; -+ } -+ dpaa2_q = (struct dpaa2_queue *)dev->data->rx_queues[rx_queue_id]; -+ -+ /*Get the tc id and flow id from given VQ id*/ -+ tc_id = rx_queue_id / MAX_DIST_PER_TC; -+ flow_id = rx_queue_id % MAX_DIST_PER_TC; -+ memset(&cfg, 0, sizeof(struct dpni_queue_cfg)); -+ -+ cfg.options = cfg.options | DPNI_QUEUE_OPT_USER_CTX; -+ -+#ifdef DPAA2_STASHING -+ cfg.options = cfg.options | DPNI_QUEUE_OPT_FLC; -+#endif -+ -+ cfg.user_ctx = (uint64_t)(dpaa2_q); -+#ifdef DPAA2_STASHING -+ cfg.flc_cfg.flc_type = DPNI_FLC_STASH; -+ cfg.flc_cfg.frame_data_size = DPNI_STASH_SIZE_64B; -+ /* Enabling Annotation stashing */ -+ cfg.options |= DPNI_FLC_STASH_FRAME_ANNOTATION; -+ cfg.flc_cfg.options = DPNI_FLC_STASH_FRAME_ANNOTATION; -+#endif -+ -+ cfg.options = cfg.options | DPNI_QUEUE_OPT_TAILDROP_THRESHOLD; -+ cfg.tail_drop_threshold = 2048;// 16 packet -+ -+ ret = dpni_set_rx_flow(dpni, CMD_PRI_LOW, priv->token, -+ tc_id, flow_id, &cfg); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); -+ return -1; -+ } -+ return 0; -+} -+ -+static int -+dpaa2_tx_queue_setup(struct rte_eth_dev *dev, -+ uint16_t tx_queue_id, -+ uint16_t nb_tx_desc __rte_unused, -+ unsigned int socket_id __rte_unused, -+ const struct rte_eth_txconf *tx_conf __rte_unused) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct dpaa2_queue *dpaa2_q; -+ struct fsl_mc_io *dpni = priv->hw; -+ struct dpni_tx_flow_cfg cfg; -+ struct dpni_tx_conf_cfg tx_conf_cfg; -+#ifdef QBMAN_MULTI_TX -+ struct dpni_congestion_notification_cfg cong_notif_cfg; -+#endif -+ uint32_t tc_idx; -+ uint16_t flow_id = DPNI_NEW_FLOW_ID; -+ int ret; -+ -+ PMD_INIT_FUNC_TRACE(); -+ -+ memset(&cfg, 0, sizeof(struct dpni_tx_flow_cfg)); -+ cfg.l3_chksum_gen = 1; -+ cfg.options |= DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN; -+ cfg.l4_chksum_gen = 1; -+ cfg.options = DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN; -+ memset(&tx_conf_cfg, 0, sizeof(struct dpni_tx_conf_cfg)); -+ tx_conf_cfg.errors_only = TRUE; -+ -+ /* -+ if (action & DPAA2BUF_TX_CONF_REQUIRED) { -+ cfg.options = DPNI_TX_FLOW_OPT_TX_CONF_ERROR; -+ cfg.use_common_tx_conf_queue = -+ ((action & DPAA2BUF_TX_CONF_ERR_ON_COMMON_Q) ? -+ TRUE : FALSE); -+ tx_conf_cfg.errors_only = FALSE; -+ }*/ -+ -+ if (priv->num_tc == 1) -+ tc_idx = 0; -+ else -+ tc_idx = tx_queue_id; -+ -+ ret = dpni_set_tx_flow(dpni, CMD_PRI_LOW, priv->token, &flow_id, &cfg); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error in setting the tx flow:" -+ "ErrorCode = %x\n", ret); -+ return -1; -+ } -+ /*Set tx-conf and error configuration*/ -+ ret = dpni_set_tx_conf(dpni, CMD_PRI_LOW, priv->token, -+ flow_id, &tx_conf_cfg); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error in setting tx conf settings: " -+ "ErrorCode = %x", ret); -+ return -1; -+ } -+ -+ if (tx_queue_id == 0) { -+ /*Set tx-conf and error configuration*/ -+ ret = dpni_set_tx_conf(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_COMMON_TX_CONF, &tx_conf_cfg); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error in setting tx conf settings: " -+ "ErrorCode = %x", ret); -+ return -1; -+ } -+ } -+ /*todo - add the queue id support instead of hard queue id as "0" */ -+ dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[tx_queue_id]; -+ dpaa2_q->tc_index = tc_idx; -+ if (flow_id == DPNI_NEW_FLOW_ID) -+ dpaa2_q->flow_id = 0; -+ else -+ dpaa2_q->flow_id = flow_id; -+ -+#ifdef QBMAN_MULTI_TX -+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES; -+ /*Notify about congestion when the queue size is 128 frames with each \ -+ frame 64 bytes size*/ -+ cong_notif_cfg.threshold_entry = CONG_ENTER_THRESHOLD; -+ /*Notify that the queue is not congested when the number of frames in \ -+ the queue is below this thershold. -+ TODO: Check if this value is the optimum value for better performance*/ -+ cong_notif_cfg.threshold_exit = CONG_EXIT_THRESHOLD; -+ cong_notif_cfg.message_ctx = 0; -+ cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn; -+ cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; -+ cong_notif_cfg.options = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | -+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | DPNI_CONG_OPT_COHERENT_WRITE; -+ -+ ret = dpni_set_tx_tc_congestion_notification(dpni, CMD_PRI_LOW, -+ priv->token, -+ tc_idx, &cong_notif_cfg); -+ if(ret) { -+ PMD_DRV_LOG(ERR, "Error in setting tx congestion notification " -+ "settings: ErrorCode = %x", ret); -+ return -1; -+ } -+#endif -+ return 0; -+} -+ -+static const uint32_t * -+dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) -+{ -+ static const uint32_t ptypes[] = { -+ /*todo -= add more types */ -+ RTE_PTYPE_L2_ETHER, -+ RTE_PTYPE_L3_IPV4, -+ RTE_PTYPE_L3_IPV4_EXT, -+ RTE_PTYPE_L3_IPV6, -+ RTE_PTYPE_L3_IPV6_EXT, -+ RTE_PTYPE_L4_TCP, -+ RTE_PTYPE_L4_UDP, -+ RTE_PTYPE_L4_SCTP, -+ RTE_PTYPE_L4_ICMP, -+ RTE_PTYPE_UNKNOWN -+ }; -+ -+ if (dev->rx_pkt_burst == eth_dpaa2_prefetch_rx || -+ dev->rx_pkt_burst == eth_dpaa2_rx) -+ return ptypes; -+ return NULL; -+} -+ -+static int -+dpaa2_dev_start(struct rte_eth_dev *dev) -+{ -+ struct rte_eth_dev_data *data = dev->data; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ struct dpni_queue_attr cfg; -+ uint16_t qdid; -+ struct dpaa2_queue *dpaa2_q; -+ int ret, i, mask = 0; -+ -+ PMD_INIT_FUNC_TRACE(); -+ -+ dev->data->dev_link.link_status = 1; -+ -+ ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure %d in enabling dpni %d device\n", -+ ret, priv->hw_id); -+ return ret; -+ } -+ -+ ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, &qdid); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); -+ return ret; -+ } -+ priv->qdid = qdid; -+ -+ for (i = 0; i < data->nb_rx_queues; i++) { -+ dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; -+ ret = dpni_get_rx_flow(dpni, CMD_PRI_LOW, priv->token, -+ dpaa2_q->tc_index, dpaa2_q->flow_id, &cfg); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error to get flow " -+ "information Error code = %d\n", ret); -+ return ret; -+ } -+ dpaa2_q->fqid = cfg.fqid; -+ } -+ /* -+ * VLAN Offload Settings -+ */ -+ if (priv->options & DPNI_OPT_VLAN_FILTER) -+ mask = ETH_VLAN_FILTER_MASK; -+ -+ if (priv->options & DPNI_OPT_VLAN_MANIPULATION) -+ mask = ETH_VLAN_STRIP_MASK; -+ -+ if (mask) -+ dpaa2_vlan_offload_set(dev, mask); -+ -+ return 0; -+} -+ -+/********************************************************************* -+ * -+ * This routine disables all traffic on the adapter by issuing a -+ * global reset on the MAC. -+ * -+ **********************************************************************/ -+static void -+dpaa2_dev_stop(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ int ret; -+ struct rte_eth_link link; -+ -+ dev->data->dev_link.link_status = 0; -+ -+ ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in disabling dpni %d device\n", priv->hw_id); -+ return; -+ } -+ -+ /* clear the recorded link status */ -+ memset(&link, 0, sizeof(link)); -+ rte_dpni_dev_atomic_write_link_status(dev, &link); -+} -+ -+static void -+dpaa2_dev_close(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ int ret; -+ struct rte_eth_link link; -+ -+ /*Function is reverse of dpaa2_dev_init. -+ * It does the following: -+ * 1. Detach a DPNI from attached resources i.e. buffer pools, dpbp_id. -+ * 2. Close the DPNI device -+ * 3. Free the allocated reqources. -+ */ -+ -+ /* Clean the device first */ -+ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure cleaning dpni device with" -+ "error code %d\n", ret); -+ return; -+ } -+ -+ /*Close the device at underlying layer*/ -+ ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure closing dpni device with" -+ "error code %d\n", ret); -+ return; -+ } -+ -+ /*Free the allocated memory for ethernet private data and dpni*/ -+ priv->hw = NULL; -+ free(dpni); -+ -+ memset(&link, 0, sizeof(link)); -+ rte_dpni_dev_atomic_write_link_status(dev, &link); -+} -+ -+static void -+dpaa2_dev_promiscuous_enable( -+ struct rte_eth_dev *dev) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, TRUE); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode"); -+ return; -+} -+ -+static void -+dpaa2_dev_promiscuous_disable( -+ struct rte_eth_dev *dev) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, FALSE); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to disable promiscuous mode"); -+ return; -+} -+ -+static void -+dpaa2_dev_allmulticast_enable( -+ struct rte_eth_dev *dev) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode"); -+ return; -+} -+ -+static void -+dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode"); -+ return; -+} -+ -+static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return -EINVAL; -+ } -+ -+ /* check that mtu is within the allowed range */ -+ -+ if ((mtu < ETHER_MIN_MTU) || (frame_size > ETHER_MAX_JUMBO_FRAME_LEN)) -+ return -EINVAL; -+ -+ /* Set the Max Rx frame length as 'mtu' + -+ * Maximum Ethernet header length */ -+ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, -+ mtu + ETH_VLAN_HLEN); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "setting the max frame length failed"); -+ return -1; -+ } -+ if (priv->options & DPNI_OPT_IPF) { -+ ret = dpni_set_mtu(dpni, CMD_PRI_LOW, priv->token, mtu); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Setting the MTU failed"); -+ return -1; -+ } -+ } -+ -+ PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu); -+ return 0; -+} -+ -+static void -+dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, -+ struct ether_addr *addr, -+ __rte_unused uint32_t index, -+ __rte_unused uint32_t pool) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, -+ priv->token, addr->addr_bytes); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Adding the MAC ADDR failed"); -+ } -+ -+ return; -+} -+ -+static void -+dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, -+ uint32_t index) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ struct rte_eth_dev_data *data = dev->data; -+ struct ether_addr *macaddr; -+ -+ macaddr = &data->mac_addrs[index]; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, -+ priv->token, macaddr->addr_bytes); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Removing the MAC ADDR failed"); -+ } -+ -+ return; -+} -+ -+static void -+dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, -+ struct ether_addr *addr) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, -+ priv->token, addr->addr_bytes); -+ -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Setting the MAC ADDR failed"); -+ } -+ -+ return; -+} -+ -+int dpaa2_dev_get_mac_addr(struct rte_eth_dev *dev, -+ struct ether_addr *addr) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return -EINVAL; -+ } -+ -+ ret = dpni_get_primary_mac_addr(dpni, CMD_PRI_LOW, -+ priv->token, addr->addr_bytes); -+ -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Getting the MAC ADDR failed"); -+ } -+ -+ return ret; -+} -+ -+/*int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast) -+ -+ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+ -+dpni_set_errors_behavior -+ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+ -+*/ -+ -+static int dpaa2_timestamp_enable(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ struct dpni_buffer_layout layout; -+ int ret; -+ -+ layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; -+ layout.pass_timestamp = TRUE; -+ -+ ret = dpni_set_rx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Enabling timestamp for Rx failed with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ ret = dpni_set_tx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Enabling timestamp failed for Tx with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ ret = dpni_set_tx_conf_buffer_layout(dpni, CMD_PRI_LOW, -+ priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Enabling timestamp failed for Tx-conf with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_timestamp_disable(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ struct dpni_buffer_layout layout; -+ int ret; -+ -+ layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; -+ layout.pass_timestamp = FALSE; -+ -+ ret = dpni_set_rx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Rx with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ ret = dpni_set_tx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Tx with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ ret = dpni_set_tx_conf_buffer_layout(dpni, CMD_PRI_LOW, -+ priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Tx-conf with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ return ret; -+} -+ -+/* return 0 means link status changed, -1 means not changed */ -+static int -+dpaa2_dev_get_link_info(struct rte_eth_dev *dev, -+ int wait_to_complete __rte_unused) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ struct rte_eth_link link, old; -+ struct dpni_link_state state = {0}; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return 0; -+ } -+ memset(&old, 0, sizeof(old)); -+ rte_dpni_dev_atomic_read_link_status(dev, &old); -+ -+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); -+ if (ret < 0) { -+ PMD_DRV_LOG(ERR, "dpni_get_link_state"); -+ return 0; -+ } -+ -+ if (state.up == 0) { -+ rte_dpni_dev_atomic_write_link_status(dev, &link); -+ if (state.up == old.link_status) -+ return -1; -+ return 0; -+ } -+ link.link_status = state.up; -+ link.link_speed = state.rate; -+ -+ if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) -+ link.link_duplex = ETH_LINK_HALF_DUPLEX; -+ else -+ link.link_duplex = ETH_LINK_FULL_DUPLEX; -+ -+ rte_dpni_dev_atomic_write_link_status(dev, &link); -+ -+ if (link.link_status == old.link_status) -+ return -1; -+ -+ return 0; -+} -+ -+static -+void dpaa2_dev_stats_get(struct rte_eth_dev *dev, -+ struct rte_eth_stats *stats) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ int32_t retcode; -+ uint64_t value; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ if (!stats) { -+ PMD_DRV_LOG(ERR, "stats is NULL"); -+ return; -+ } -+ -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME, &value); -+ if (retcode) -+ goto error; -+ stats->ipackets = value; -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_BYTE, &value); -+ if (retcode) -+ goto error; -+ stats->ibytes = value; -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME_DROP, &value); -+ if (retcode) -+ goto error; -+ stats->ierrors = value; -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME_DISCARD, &value); -+ if (retcode) -+ goto error; -+ stats->ierrors = stats->ierrors + value; -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_FRAME, &value); -+ if (retcode) -+ goto error; -+ stats->opackets = value; -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_BYTE, &value); -+ if (retcode) -+ goto error; -+ stats->obytes = value; -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_FRAME_DISCARD, &value); -+ if (retcode) -+ goto error; -+ stats->oerrors = value; -+ -+ return; -+ -+error: -+ PMD_DRV_LOG(ERR, "Operation not completed:Error Code = %d\n", retcode); -+ return; -+}; -+ -+static -+void dpaa2_dev_stats_reset(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ int32_t retcode; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_BYTE, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_BCAST_FRAME, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_BCAST_BYTES, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_MCAST_FRAME, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_MCAST_BYTE, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME_DROP, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME_DISCARD, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_FRAME, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_BYTE, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_FRAME_DISCARD, 0); -+ if (retcode) -+ goto error; -+ -+ return; -+ -+error: -+ PMD_DRV_LOG(ERR, "Operation not completed:Error Code = %d\n", retcode); -+ return; -+}; -+ -+static struct eth_dev_ops ops = { -+ .dev_configure = dpaa2_eth_dev_configure, -+ .dev_start = dpaa2_dev_start, -+ .dev_stop = dpaa2_dev_stop, -+ .dev_close = dpaa2_dev_close, -+ .promiscuous_enable = dpaa2_dev_promiscuous_enable, -+ .promiscuous_disable = dpaa2_dev_promiscuous_disable, -+ .allmulticast_enable = dpaa2_dev_allmulticast_enable, -+ .allmulticast_disable = dpaa2_dev_allmulticast_disable, -+ /* .dev_set_link_up = ixgbe_dev_set_link_up, */ -+ /* .dev_set_link_down = ixgbe_dev_set_link_down, */ -+ .link_update = dpaa2_dev_get_link_info, -+ .stats_get = dpaa2_dev_stats_get, -+ /* .xstats_get = ixgbe_dev_xstats_get, */ -+ .stats_reset = dpaa2_dev_stats_reset, -+ /* .xstats_reset = ixgbe_dev_xstats_reset, */ -+ /* .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set, */ -+ .dev_infos_get = dpaa2_eth_dev_info, -+ .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, -+ .mtu_set = dpaa2_dev_mtu_set, -+ .vlan_filter_set = dpaa2_vlan_filter_set, -+/* .vlan_tpid_set = i40e_vlan_tpid_set, */ -+ .vlan_offload_set = dpaa2_vlan_offload_set, -+/* .vlan_strip_queue_set = i40e_vlan_strip_queue_set, */ -+/* .vlan_pvid_set = i40e_vlan_pvid_set, */ -+/* .rx_queue_start = i40e_dev_rx_queue_start, */ -+/* .rx_queue_stop = i40e_dev_rx_queue_stop, */ -+/* .tx_queue_start = i40e_dev_tx_queue_start, */ -+/* .tx_queue_stop = i40e_dev_tx_queue_stop, */ -+ .rx_queue_setup = dpaa2_rx_queue_setup, -+/* .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable, */ -+/* .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable, */ -+/* .rx_queue_release = i40e_dev_rx_queue_release, */ -+/* .rx_queue_count = i40e_dev_rx_queue_count, */ -+ .tx_queue_setup = dpaa2_tx_queue_setup, -+/* .tx_queue_release = i40e_dev_tx_queue_release, */ -+/* .dev_led_on = i40e_dev_led_on, */ -+/* .dev_led_off = i40e_dev_led_off, */ -+/* .flow_ctrl_get = i40e_flow_ctrl_get, */ -+/* .flow_ctrl_set = i40e_flow_ctrl_set, */ -+/* .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set, */ -+ .mac_addr_add = dpaa2_dev_add_mac_addr, -+ .mac_addr_remove = dpaa2_dev_remove_mac_addr, -+/* .reta_update = i40e_dev_rss_reta_update, */ -+/* .reta_query = i40e_dev_rss_reta_query, */ -+/* .rss_hash_update = i40e_dev_rss_hash_update, */ -+/* .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, */ -+/* .filter_ctrl = i40e_dev_filter_ctrl, */ -+/* .rxq_info_get = i40e_rxq_info_get, */ -+/* .txq_info_get = i40e_txq_info_get, */ -+/* .mirror_rule_set = i40e_mirror_rule_set, */ -+/* .mirror_rule_reset = i40e_mirror_rule_reset, */ -+ .timesync_enable = dpaa2_timestamp_enable, -+ .timesync_disable = dpaa2_timestamp_disable, -+/* .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp, */ -+/* .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp, */ -+/* .get_dcb_info = i40e_dev_get_dcb_info, */ -+/* .timesync_adjust_time = i40e_timesync_adjust_time, */ -+/* .timesync_read_time = i40e_timesync_read_time, */ -+/* .timesync_write_time = i40e_timesync_write_time, */ -+/* .get_reg_length = i40e_get_reg_length, */ -+/* .get_reg = i40e_get_regs, */ -+/* .get_eeprom_length = i40e_get_eeprom_length, */ -+/* .get_eeprom = i40e_get_eeprom, */ -+ .mac_addr_set = dpaa2_dev_set_mac_addr, -+}; -+ -+static int -+dpaa2_dev_init(struct rte_eth_dev *eth_dev) -+{ -+ struct rte_eth_dev_data *data = eth_dev->data; -+ struct fsl_mc_io *dpni_dev; -+ struct dpni_attr attr; -+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; -+ struct dpni_buffer_layout layout; -+ int i, ret, hw_id = eth_dev->pci_dev->addr.devid; -+ struct dpni_extended_cfg *ext_cfg = NULL; -+ int tot_size; -+ -+ PMD_INIT_FUNC_TRACE(); -+ -+ dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io)); -+ if (!dpni_dev) { -+ PMD_DRV_LOG(ERR, "malloc failed for dpni device\n"); -+ return -1; -+ } -+ -+ dpni_dev->regs = mcp_ptr_list[0]; -+ ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in opening dpni@%d device with" -+ "error code %d\n", hw_id, ret); -+ return -1; -+ } -+ -+ /* Clean the device first */ -+ ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure cleaning dpni@%d device with" -+ "error code %d\n", hw_id, ret); -+ return -1; -+ } -+ -+ ext_cfg = (struct dpni_extended_cfg *)rte_malloc(NULL, 256, -+ RTE_CACHE_LINE_SIZE); -+ if (!ext_cfg) { -+ PMD_DRV_LOG(ERR, "No data memory\n"); -+ return -1; -+ } -+ attr.ext_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(ext_cfg)); -+ -+ ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in getting dpni@%d attribute, " -+ "error code %d\n", hw_id, ret); -+ return -1; -+ } -+ -+ priv->num_tc = attr.max_tcs; -+ for (i = 0; i < attr.max_tcs; i++) { -+ priv->num_dist_per_tc[i] = ext_cfg->tc_cfg[i].max_dist; -+ priv->nb_rx_queues += priv->num_dist_per_tc[i]; -+ /* todo - currently we only support one TC index in RX side */ -+ break; -+ } -+ if (attr.max_tcs == 1) -+ priv->nb_tx_queues = attr.max_senders; -+ else -+ priv->nb_tx_queues = attr.max_tcs; -+ PMD_DRV_LOG(INFO, "num_tc %d\n", priv->num_tc); -+ PMD_DRV_LOG(INFO, "nb_rx_queues %d\n", priv->nb_rx_queues); -+ -+ eth_dev->data->nb_rx_queues = priv->nb_rx_queues; -+ eth_dev->data->nb_tx_queues = priv->nb_tx_queues; -+ -+ priv->hw = dpni_dev; -+ priv->hw_id = hw_id; -+ priv->options = attr.options; -+ -+ priv->max_unicast_filters = attr.max_unicast_filters; -+ priv->max_multicast_filters = attr.max_multicast_filters; -+ -+ if (attr.options & DPNI_OPT_VLAN_FILTER) -+ priv->max_vlan_filters = attr.max_vlan_filters; -+ else -+ priv->max_vlan_filters = 0; -+ -+ ret = dpaa2_alloc_rx_tx_queues(eth_dev); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); -+ return -1; -+ } -+ -+ data->mac_addrs = (struct ether_addr *)malloc(sizeof(struct ether_addr)); -+ -+ /* Allocate memory for storing MAC addresses */ -+ eth_dev->data->mac_addrs = rte_zmalloc("dpni", -+ ETHER_ADDR_LEN * attr.max_unicast_filters, 0); -+ if (eth_dev->data->mac_addrs == NULL) { -+ PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to " -+ "store MAC addresses", -+ ETHER_ADDR_LEN * attr.max_unicast_filters); -+ return -ENOMEM; -+ } -+ -+ ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, -+ priv->token, -+ (uint8_t *)(data->mac_addrs[0].addr_bytes)); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "DPNI get mac address failed:" -+ " Error Code = %d\n", ret); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "Adding Broadcast Address...\n"); -+ memset(data->mac_addrs[1].addr_bytes, 0xff, ETH_ADDR_LEN); -+ ret = dpni_add_mac_addr(dpni_dev, CMD_PRI_LOW, -+ priv->token, -+ (uint8_t *)(data->mac_addrs[1].addr_bytes)); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "DPNI set broadcast mac address failed:" -+ " Error Code = %0x\n", ret); -+ return -1; -+ } -+ -+ /* ... rx buffer layout ... */ -+ /*Check alignment for buffer layouts first*/ -+ tot_size = DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES/* dummy */ + -+ 128 /*RTE_MUF */ + (128+DPAA2_RES)/*VLIB*/ + RTE_PKTMBUF_HEADROOM; -+ tot_size = DPAA2_ALIGN_ROUNDUP(tot_size, -+ DPAA2_PACKET_LAYOUT_ALIGN); -+ -+ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); -+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | -+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | -+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | -+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; -+ -+ layout.pass_frame_status = 1; -+ layout.data_head_room = -+ (tot_size - (DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION)); -+ layout.private_data_size = DPAA2_FD_PTA_SIZE; -+ layout.pass_parser_result = 1; -+ -+ ret = dpni_set_rx_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, -+ &layout); -+ if (ret) { -+ printf("Err(%d) in setting rx buffer layout\n", ret); -+ return -1; -+ } -+ -+ /* ... tx buffer layout ... */ -+ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); -+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; -+ layout.pass_frame_status = 1; -+ ret = dpni_set_tx_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ printf("Error (%d) in setting tx buffer layout\n", ret); -+ return -1; -+ } -+ -+ /* ... tx-conf and error buffer layout ... */ -+ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); -+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; -+ layout.pass_frame_status = 1; -+ ret = dpni_set_tx_conf_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ printf("Error (%d) in setting tx-conf buffer layout\n", ret); -+ return -1; -+ } -+ -+ /* TODO - Set the MTU if required */ -+ -+ eth_dev->dev_ops = &ops; -+ eth_dev->rx_pkt_burst = eth_dpaa2_prefetch_rx;/*eth_dpaa2_rx;*/ -+ eth_dev->tx_pkt_burst = eth_dpaa2_tx; -+ -+ rte_free(ext_cfg); -+ -+ return 0; -+} -+ -+static struct eth_driver rte_dpaa2_dpni = { -+ { -+ .name = "rte_dpaa2_dpni", -+ .id_table = pci_id_dpaa2_map, -+ }, -+ .eth_dev_init = dpaa2_dev_init, -+ .dev_private_size = sizeof(struct dpaa2_dev_priv), -+}; -+ -+static int -+rte_pmd_dpaa2_devinit( -+ const char *name __rte_unused, -+ const char *params __rte_unused) -+{ -+ printf("Initializing dpaa2_pmd for %s\n", name); -+ rte_eth_driver_register(&rte_dpaa2_dpni); -+ -+ return 0; -+} -+ -+static struct rte_driver pmd_dpaa2_drv = { -+ .name = "dpaa2_pmd", -+ .type = PMD_PDEV, -+ .init = rte_pmd_dpaa2_devinit, -+}; -+ -+PMD_REGISTER_DRIVER(pmd_dpaa2_drv); -diff --git a/drivers/net/dpaa2/rte_eth_dpni_annot.h b/drivers/net/dpaa2/rte_eth_dpni_annot.h -new file mode 100644 -index 0000000..00fac9b ---- /dev/null -+++ b/drivers/net/dpaa2/rte_eth_dpni_annot.h -@@ -0,0 +1,311 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+/** -+ * @file -+ * -+ * DPNI packet parse results - implementation internal -+ */ -+ -+#ifndef RTE_ETH_DPNI_ANNOT_H_ -+#define RTE_ETH_DPNI_ANNOT_H_ -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+/* Annotation valid bits in FD FRC */ -+#define DPAA2_FD_FRC_FASV 0x8000 -+#define DPAA2_FD_FRC_FAEADV 0x4000 -+#define DPAA2_FD_FRC_FAPRV 0x2000 -+#define DPAA2_FD_FRC_FAIADV 0x1000 -+#define DPAA2_FD_FRC_FASWOV 0x0800 -+#define DPAA2_FD_FRC_FAICFDV 0x0400 -+ -+/* Annotation bits in FD CTRL */ -+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ -+#define DPAA2_FD_CTRL_PTA 0x00800000 -+#define DPAA2_FD_CTRL_PTV1 0x00400000 -+ -+/* Frame annotation status */ -+struct dpaa2_fas { -+ uint8_t reserved; -+ uint8_t ppid; -+ __le16 ifpid; -+ __le32 status; -+} __packed; -+ -+/** -+ * Internal Packet annotation header -+ */ -+struct pkt_annotation { -+ /**< word1: Frame Annotation Status (8 bytes)*/ -+ uint64_t word1; -+ /**< word2: Time Stamp (8 bytes)*/ -+ uint64_t word2; -+ /**< word3: Next Hdr + FAF Extension + FAF (2 + 2 + 4 bytes)*/ -+ uint64_t word3; -+ /**< word4: Frame Annotation Flags-FAF (8 bytes) */ -+ uint64_t word4; -+ /**< word5: -+ ShimOffset_1 + ShimOffset_2 + IPPIDOffset + EthOffset + -+ LLC+SNAPOffset + VLANTCIOffset_1 + VLANTCIOffset_n + -+ LastETypeOffset (1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes) -+ */ -+ uint64_t word5; -+ /**< word6: -+ PPPoEOffset + MPLSOffset_1 + MPLSOffset_n + ARPorIPOffset_1 -+ + IPOffset_norMInEncapO + GREOffset + L4Offset + -+ GTPorESPorIPSecOffset(1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes) -+ */ -+ uint64_t word6; -+ /**< word7: -+ RoutingHdrOfset1 + RoutingHdrOfset2 + NxtHdrOffset + IPv6FragOffset + -+ GrossRunningSum + RunningSum(1 + 1 + 1 + 1 + 2 + 2 bytes) -+ */ -+ uint64_t word7; -+ /**< word8: -+ ParseErrorcode + Soft Parsing Context (1 + 7 bytes) -+ */ -+ uint64_t word8; /**< Layer 4 length */ -+}; -+ -+/** -+ * Internal Macros to get/set Packet annotation header -+ */ -+ -+/** General Macro to define a particular bit position*/ -+#define BIT_POS(x) ((uint64_t)1 << ((x))) -+/** Set a bit in the variable */ -+#define BIT_SET_AT_POS(var, pos) (var |= pos) -+/** Reset the bit in the variable */ -+#define BIT_RESET_AT_POS(var, pos) (var &= ~(pos)) -+/** Check the bit is set in the variable */ -+#define BIT_ISSET_AT_POS(var, pos) ((var & pos) ? 1 : 0) -+/** -+ * Macrso to define bit position in word3 -+ */ -+#define NEXT_HDR(var) ((uint64_t)var & 0xFFFF000000000000) -+#define FAF_EXTN_IPV6_ROUTE_HDR_PRESENT(var) BIT_POS(16) -+#define FAF_EXTN_RESERVED(var) ((uint64_t)var & 0x00007FFF00000000) -+#define FAF_USER_DEFINED_RESERVED(var) ((uint64_t)var & 0x00000000FF000000) -+#define SHIM_SHELL_SOFT_PARSING_ERRROR BIT_POS(23) -+#define PARSING_ERROR BIT_POS(22) -+#define L2_ETH_MAC_PRESENT BIT_POS(21) -+#define L2_ETH_MAC_UNICAST BIT_POS(20) -+#define L2_ETH_MAC_MULTICAST BIT_POS(19) -+#define L2_ETH_MAC_BROADCAST BIT_POS(18) -+#define L2_ETH_FRAME_IS_BPDU BIT_POS(17) -+#define L2_ETH_FCOE_PRESENT BIT_POS(16) -+#define L2_ETH_FIP_PRESENT BIT_POS(15) -+#define L2_ETH_PARSING_ERROR BIT_POS(14) -+#define L2_LLC_SNAP_PRESENT BIT_POS(13) -+#define L2_UNKNOWN_LLC_OUI BIT_POS(12) -+#define L2_LLC_SNAP_ERROR BIT_POS(11) -+#define L2_VLAN_1_PRESENT BIT_POS(10) -+#define L2_VLAN_N_PRESENT BIT_POS(9) -+#define L2_VLAN_CFI_BIT_PRESENT BIT_POS(8) -+#define L2_VLAN_PARSING_ERROR BIT_POS(7) -+#define L2_PPPOE_PPP_PRESENT BIT_POS(6) -+#define L2_PPPOE_PPP_PARSING_ERROR BIT_POS(5) -+#define L2_MPLS_1_PRESENT BIT_POS(4) -+#define L2_MPLS_N_PRESENT BIT_POS(3) -+#define L2_MPLS_PARSING_ERROR BIT_POS(2) -+#define L2_ARP_PRESENT BIT_POS(1) -+#define L2_ARP_PARSING_ERROR BIT_POS(0) -+/** -+ * Macrso to define bit position in word4 -+ */ -+#define L2_UNKNOWN_PROTOCOL BIT_POS(63) -+#define L2_SOFT_PARSING_ERROR BIT_POS(62) -+#define L3_IPV4_1_PRESENT BIT_POS(61) -+#define L3_IPV4_1_UNICAST BIT_POS(60) -+#define L3_IPV4_1_MULTICAST BIT_POS(59) -+#define L3_IPV4_1_BROADCAST BIT_POS(58) -+#define L3_IPV4_N_PRESENT BIT_POS(57) -+#define L3_IPV4_N_UNICAST BIT_POS(56) -+#define L3_IPV4_N_MULTICAST BIT_POS(55) -+#define L3_IPV4_N_BROADCAST BIT_POS(54) -+#define L3_IPV6_1_PRESENT BIT_POS(53) -+#define L3_IPV6_1_UNICAST BIT_POS(52) -+#define L3_IPV6_1_MULTICAST BIT_POS(51) -+#define L3_IPV6_N_PRESENT BIT_POS(50) -+#define L3_IPV6_N_UNICAST BIT_POS(49) -+#define L3_IPV6_N_MULTICAST BIT_POS(48) -+#define L3_IP_1_OPT_PRESENT BIT_POS(47) -+#define L3_IP_1_UNKNOWN_PROTOCOL BIT_POS(46) -+#define L3_IP_1_MORE_FRAGMENT BIT_POS(45) -+#define L3_IP_1_FIRST_FRAGMENT BIT_POS(44) -+#define L3_IP_1_PARSING_ERROR BIT_POS(43) -+#define L3_IP_N_OPT_PRESENT BIT_POS(42) -+#define L3_IP_N_UNKNOWN_PROTOCOL BIT_POS(41) -+#define L3_IP_N_MORE_FRAGMENT BIT_POS(40) -+#define L3_IP_N_FIRST_FRAGMENT BIT_POS(39) -+#define L3_PROTO_ICMP_PRESENT BIT_POS(38) -+#define L3_PROTO_IGMP_PRESENT BIT_POS(37) -+#define L3_PROTO_ICMPV6_PRESENT BIT_POS(36) -+#define L3_PROTO_UDP_LIGHT_PRESENT BIT_POS(35) -+#define L3_IP_N_PARSING_ERROR BIT_POS(34) -+#define L3_MIN_ENCAP_PRESENT BIT_POS(33) -+#define L3_MIN_ENCAP_SBIT_PRESENT BIT_POS(32) -+#define L3_MIN_ENCAP_PARSING_ERROR BIT_POS(31) -+#define L3_PROTO_GRE_PRESENT BIT_POS(30) -+#define L3_PROTO_GRE_RBIT_PRESENT BIT_POS(29) -+#define L3_PROTO_GRE_PARSING_ERROR BIT_POS(28) -+#define L3_IP_UNKNOWN_PROTOCOL BIT_POS(27) -+#define L3_SOFT_PARSING_ERROR BIT_POS(26) -+#define L3_PROTO_UDP_PRESENT BIT_POS(25) -+#define L3_PROTO_UDP_PARSING_ERROR BIT_POS(24) -+#define L3_PROTO_TCP_PRESENT BIT_POS(23) -+#define L3_PROTO_TCP_OPT_PRESENT BIT_POS(22) -+#define L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT BIT_POS(21) -+#define L3_PROTO_TCP_CTRL_BIT_3_TO_5_PRESENT BIT_POS(20) -+#define L3_PROTO_TCP_PARSING_ERROR BIT_POS(19) -+#define L3_PROTO_IPSEC_PRESENT BIT_POS(18) -+#define L3_PROTO_IPSEC_ESP_PRESENT BIT_POS(17) -+#define L3_PROTO_IPSEC_AH_PRESENT BIT_POS(16) -+#define L3_PROTO_IPSEC_PARSING_ERROR BIT_POS(15) -+#define L3_PROTO_SCTP_PRESENT BIT_POS(14) -+#define L3_PROTO_SCTP_PARSING_ERROR BIT_POS(13) -+#define L3_PROTO_DCCP_PRESENT BIT_POS(12) -+#define L3_PROTO_DCCP_PARSING_ERROR BIT_POS(11) -+#define L4_UNKNOWN_PROTOCOL BIT_POS(10) -+#define L4_SOFT_PARSING_ERROR BIT_POS(9) -+#define L3_PROTO_GTP_PRESENT BIT_POS(8) -+#define L3_PROTO_GTP_PARSING_ERROR BIT_POS(7) -+#define L3_PROTO_ESP_PRESENT BIT_POS(6) -+#define L3_PROTO_ESP_PARSING_ERROR BIT_POS(5) -+#define L3_PROTO_ISCSI_PRESENT BIT_POS(4) -+#define L3_PROTO_CAPWAN__CTRL_PRESENT BIT_POS(3) -+#define L3_PROTO_CAPWAN__DATA_PRESENT BIT_POS(2) -+#define L5_SOFT_PARSING_ERROR BIT_POS(1) -+#define L3_IPV6_ROUTE_HDR_PRESENT BIT_POS(0) -+ -+/** -+ * Macros to get values in word5 -+ */ -+#define SHIM_OFFSET_1(var) ((uint64_t)var & 0xFF00000000000000) -+#define SHIM_OFFSET_2(var) ((uint64_t)var & 0x00FF000000000000) -+#define IP_PID_OFFSET(var) ((uint64_t)var & 0x0000FF0000000000) -+#define ETH_OFFSET(var) ((uint64_t)var & 0x000000FF00000000) -+#define LLC_SNAP_OFFSET(var) ((uint64_t)var & 0x00000000FF000000) -+#define VLAN_TCI_OFFSET_1(var) ((uint64_t)var & 0x0000000000FF0000) -+#define VLAN_TCI_OFFSET_N(var) ((uint64_t)var & 0x000000000000FF00) -+#define LAST_ETYPE_OFFSET(var) ((uint64_t)var & 0x00000000000000FF) -+ -+/** -+ * Macros to get values in word6 -+ */ -+#define PPPOE_OFFSET(var) ((uint64_t)var & 0xFF00000000000000) -+#define MPLS_OFFSET_1(var) ((uint64_t)var & 0x00FF000000000000) -+#define MPLS_OFFSET_N(var) ((uint64_t)var & 0x0000FF0000000000) -+#define ARP_OR_IP_OFFSET_1(var) ((uint64_t)var & 0x000000FF00000000) -+#define IP_N_OR_MIN_ENCAP_OFFSET(var) ((uint64_t)var & 0x00000000FF000000) -+#define GRE_OFFSET(var) ((uint64_t)var & 0x0000000000FF0000) -+#define L4_OFFSET(var) ((uint64_t)var & 0x000000000000FF00) -+#define GTP_OR_ESP_OR_IPSEC_OFFSET(var) ((uint64_t)var & 0x00000000000000FF) -+ -+/** -+ * Macros to get values in word7 -+ */ -+#define IPV6_ROUTING_HDR_OFFSET_1(var) ((uint64_t)var & 0xFF00000000000000) -+#define IPV6_ROUTING_HDR_OFFSET_2(var) ((uint64_t)var & 0x00FF000000000000) -+#define NEXT_HDR_OFFSET(var) ((uint64_t)var & 0x0000FF0000000000) -+#define IPV6_FRAG_OFFSET(var) ((uint64_t)var & 0x000000FF00000000) -+#define GROSS_RUNNING_SUM(var) ((uint64_t)var & 0x00000000FFFF0000) -+#define RUNNING_SUM(var) ((uint64_t)var & 0x000000000000FFFF) -+ -+/** -+ * Macros to get values in word8 -+ */ -+#define PARSE_ERROR_CODE(var) ((uint64_t)var & 0xFF00000000000000) -+#define SOFT_PARSING_CONTEXT(var) ((uint64_t)var & 0x00FFFFFFFFFFFFFF) -+ -+/* Debug frame, otherwise supposed to be discarded */ -+#define DPAA2_ETH_FAS_DISC 0x80000000 -+/* MACSEC frame */ -+#define DPAA2_ETH_FAS_MS 0x40000000 -+#define DPAA2_ETH_FAS_PTP 0x08000000 -+/* Ethernet multicast frame */ -+#define DPAA2_ETH_FAS_MC 0x04000000 -+/* Ethernet broadcast frame */ -+#define DPAA2_ETH_FAS_BC 0x02000000 -+#define DPAA2_ETH_FAS_KSE 0x00040000 -+#define DPAA2_ETH_FAS_EOFHE 0x00020000 -+#define DPAA2_ETH_FAS_MNLE 0x00010000 -+#define DPAA2_ETH_FAS_TIDE 0x00008000 -+#define DPAA2_ETH_FAS_PIEE 0x00004000 -+/* Frame length error */ -+#define DPAA2_ETH_FAS_FLE 0x00002000 -+/* Frame physical error; our favourite pastime */ -+#define DPAA2_ETH_FAS_FPE 0x00001000 -+#define DPAA2_ETH_FAS_PTE 0x00000080 -+#define DPAA2_ETH_FAS_ISP 0x00000040 -+#define DPAA2_ETH_FAS_PHE 0x00000020 -+#define DPAA2_ETH_FAS_BLE 0x00000010 -+/* L3 csum validation performed */ -+#define DPAA2_ETH_FAS_L3CV 0x00000008 -+/* L3 csum error */ -+#define DPAA2_ETH_FAS_L3CE 0x00000004 -+/* L4 csum validation performed */ -+#define DPAA2_ETH_FAS_L4CV 0x00000002 -+/* L4 csum error */ -+#define DPAA2_ETH_FAS_L4CE 0x00000001 -+ -+/* These bits always signal errors */ -+#define DPAA2_ETH_RX_ERR_MASK (DPAA2_ETH_FAS_KSE | \ -+ DPAA2_ETH_FAS_EOFHE | \ -+ DPAA2_ETH_FAS_MNLE | \ -+ DPAA2_ETH_FAS_TIDE | \ -+ DPAA2_ETH_FAS_PIEE | \ -+ DPAA2_ETH_FAS_FLE | \ -+ DPAA2_ETH_FAS_FPE | \ -+ DPAA2_ETH_FAS_PTE | \ -+ DPAA2_ETH_FAS_ISP | \ -+ DPAA2_ETH_FAS_PHE | \ -+ DPAA2_ETH_FAS_BLE | \ -+ DPAA2_ETH_FAS_L3CE | \ -+ DPAA2_ETH_FAS_L4CE) -+/* Unsupported features in the ingress */ -+#define DPAA2_ETH_RX_UNSUPP_MASK DPAA2_ETH_FAS_MS -+/* Tx errors */ -+#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_ETH_FAS_KSE | \ -+ DPAA2_ETH_FAS_EOFHE | \ -+ DPAA2_ETH_FAS_MNLE | \ -+ DPAA2_ETH_FAS_TIDE) -+ -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif -diff --git a/drivers/net/dpaa2/rte_pmd_dpaa2_version.map b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map -new file mode 100644 -index 0000000..349c6e1 ---- /dev/null -+++ b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map -@@ -0,0 +1,4 @@ -+DPDK_16.04 { -+ -+ local: *; -+}; -diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h -index 2342fa1..8f27836 100644 ---- a/lib/librte_eal/common/eal_private.h -+++ b/lib/librte_eal/common/eal_private.h -@@ -328,4 +328,16 @@ int rte_eal_hugepage_init(void); - */ - int rte_eal_hugepage_attach(void); - -+#ifdef RTE_LIBRTE_DPAA2_PMD -+/** -+ * Initialize any soc init related functions if any before thread creation -+ */ -+int rte_eal_soc_pre_init(void); -+ -+/** -+ * Initialize any soc init related functions if any after thread creation -+ */ -+int rte_eal_soc_post_init(void); -+#endif -+ - #endif /* _EAL_PRIVATE_H_ */ -diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile -index e109361..abcd02c 100644 ---- a/lib/librte_eal/linuxapp/eal/Makefile -+++ b/lib/librte_eal/linuxapp/eal/Makefile -@@ -47,6 +47,13 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include - CFLAGS += -I$(RTE_SDK)/lib/librte_ring - CFLAGS += -I$(RTE_SDK)/lib/librte_mempool - CFLAGS += -I$(RTE_SDK)/lib/librte_ivshmem -+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y) -+CFLAGS += -I$(RTE_SDK)/lib/librte_mbuf -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include/drivers -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/driver -+endif - CFLAGS += $(WERROR_FLAGS) -O3 - - LDLIBS += -ldl -@@ -72,6 +79,10 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_lcore.c - SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_timer.c - SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_interrupts.c - SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_alarm.c -+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y) -+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_soc.c -+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio_fsl_mc.c -+endif - ifeq ($(CONFIG_RTE_LIBRTE_IVSHMEM),y) - SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_ivshmem.c - endif -diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c -index 8aafd51..b2327c7 100644 ---- a/lib/librte_eal/linuxapp/eal/eal.c -+++ b/lib/librte_eal/linuxapp/eal/eal.c -@@ -805,6 +805,11 @@ rte_eal_init(int argc, char **argv) - if (rte_eal_tailqs_init() < 0) - rte_panic("Cannot init tail queues for objects\n"); - -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ if (rte_eal_soc_pre_init() < 0) -+ rte_panic("Cannot pre init soc\n"); -+#endif -+ - #ifdef RTE_LIBRTE_IVSHMEM - if (rte_eal_ivshmem_obj_init() < 0) - rte_panic("Cannot init IVSHMEM objects\n"); -@@ -874,6 +879,11 @@ rte_eal_init(int argc, char **argv) - rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER); - rte_eal_mp_wait_lcore(); - -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ if (rte_eal_soc_post_init() < 0) -+ rte_panic("Cannot post init soc\n"); -+#endif -+ - /* Probe & Initialize PCI devices */ - if (rte_eal_pci_probe()) - rte_panic("Cannot probe PCI\n"); -diff --git a/lib/librte_eal/linuxapp/eal/eal_soc.c b/lib/librte_eal/linuxapp/eal/eal_soc.c -new file mode 100644 -index 0000000..32ae172 ---- /dev/null -+++ b/lib/librte_eal/linuxapp/eal/eal_soc.c -@@ -0,0 +1,84 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor, Inc or the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include "eal_private.h" -+ -+#ifdef RTE_LIBRTE_DPAA2_PMD -+#include "eal_vfio_fsl_mc.h" -+#endif -+ -+ -+#if (defined RTE_LIBRTE_DPAA_PMD) -+extern int usdpaa_pre_rte_eal_init(void); -+extern int usdpaa_post_rte_eal_init(void); -+#endif -+ -+ -+/* Initialize any soc init related functions if any before thread creation*/ -+int -+rte_eal_soc_pre_init(void) -+{ -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ if (rte_eal_dpaa2_init() < 0) -+ RTE_LOG(WARNING, EAL, "Cannot init FSL_MC SCAN \n"); -+#endif -+#if (defined RTE_LIBRTE_DPAA_PMD) -+ if (usdpaa_pre_rte_eal_init()) -+ RTE_LOG(WARNING, EAL, "Cannot init FSL_DPAA \n"); -+#endif -+ return 0; -+} -+ -+/* Initialize any soc init related functions if any after thread creation*/ -+int -+rte_eal_soc_post_init(void) -+{ -+#if (defined RTE_LIBRTE_DPAA_PMD) -+ if (usdpaa_post_rte_eal_init()) { -+ RTE_LOG(WARNING, EAL, "dpaa1: usdpaa portal init failed\n"); -+ } -+#endif -+ return 0; -+} -+ -diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c -new file mode 100644 -index 0000000..c71d8d6 ---- /dev/null -+++ b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c -@@ -0,0 +1,653 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "rte_pci.h" -+#include "eal_vfio.h" -+ -+#include -+ -+#include "eal_vfio_fsl_mc.h" -+ -+#include "rte_pci_dev_ids.h" -+#include "eal_filesystem.h" -+#include "eal_private.h" -+ -+#ifndef VFIO_MAX_GROUPS -+#define VFIO_MAX_GROUPS 64 -+#endif -+ -+//#define DPAA2_STAGE2_STASHING -+ -+/** Pathname of FSL-MC devices directory. */ -+#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices" -+ -+/* Number of VFIO containers & groups with in */ -+static struct vfio_group vfio_groups[VFIO_MAX_GRP]; -+static struct vfio_container vfio_containers[VFIO_MAX_CONTAINERS]; -+static char *ls2bus_container; -+static int container_device_fd; -+static uint32_t *msi_intr_vaddr; -+void *(*mcp_ptr_list); -+static uint32_t mcp_id; -+ -+static int vfio_connect_container(struct vfio_group *vfio_group) -+{ -+ struct vfio_container *container; -+ int i, fd, ret; -+ -+ /* Try connecting to vfio container already created */ -+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) { -+ container = &vfio_containers[i]; -+ if (!ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { -+ RTE_LOG(ERR, EAL, "Container pre-exists with FD[0x%x]" -+ " for this group\n", container->fd); -+ vfio_group->container = container; -+ return 0; -+ } -+ } -+ -+ /* Opens main vfio file descriptor which represents the "container" */ -+ fd = open("/dev/vfio/vfio", O_RDWR); -+ if (fd < 0) { -+ RTE_LOG(ERR, EAL, "vfio: failed to open /dev/vfio/vfio\n"); -+ return -errno; -+ } -+ -+ ret = ioctl(fd, VFIO_GET_API_VERSION); -+ if (ret != VFIO_API_VERSION) { -+ RTE_LOG(ERR, EAL, "vfio: supported vfio version: %d, " -+ "reported version: %d", VFIO_API_VERSION, ret); -+ close(fd); -+ return -EINVAL; -+ } -+#ifndef DPAA2_STAGE2_STASHING -+ /* Check whether support for SMMU type IOMMU prresent or not */ -+ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) { -+ /* Connect group to container */ -+ ret = ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &fd); -+ if (ret) { -+ RTE_LOG(ERR, EAL, "vfio: failed to set group container:\n"); -+ close(fd); -+ return -errno; -+ } -+ -+ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU); -+ if (ret) { -+ RTE_LOG(ERR, EAL, "vfio: failed to set iommu for container:\n"); -+ close(fd); -+ return -errno; -+ } -+ } else { -+ RTE_LOG(ERR, EAL, "vfio error: No supported IOMMU\n"); -+ close(fd); -+ return -EINVAL; -+ } -+#else -+ /* Check whether support for SMMU type IOMMU stage 2 present or not */ -+ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_NESTING_IOMMU)) { -+ /* Connect group to container */ -+ ret = ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &fd); -+ if (ret) { -+ RTE_LOG(ERR, EAL, "vfio: failed to set group container:\n"); -+ close(fd); -+ return -errno; -+ } -+ -+ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_NESTING_IOMMU); -+ if (ret) { -+ RTE_LOG(ERR, EAL, "vfio: failed to set iommu-2 for container:\n"); -+ close(fd); -+ return -errno; -+ } -+ } else { -+ RTE_LOG(ERR, EAL, "vfio error: No supported IOMMU-2\n"); -+ close(fd); -+ return -EINVAL; -+ } -+#endif -+ container = NULL; -+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) { -+ if (vfio_containers[i].used) -+ continue; -+ RTE_LOG(ERR, EAL, "DPAA2-Unused container at index %d\n", i); -+ container = &vfio_containers[i]; -+ } -+ if (!container) { -+ RTE_LOG(ERR, EAL, "vfio error: No Free Container Found\n"); -+ close(fd); -+ return -ENOMEM; -+ } -+ -+ container->used = 1; -+ container->fd = fd; -+ container->group_list[container->index] = vfio_group; -+ vfio_group->container = container; -+ container->index++; -+ return 0; -+} -+ -+static int vfio_map_irq_region(struct vfio_group *group) -+{ -+ int ret; -+ unsigned long *vaddr = NULL; -+ struct vfio_iommu_type1_dma_map map = { -+ .argsz = sizeof(map), -+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, -+ .vaddr = 0x6030000, -+ .iova = 0x6030000, -+ .size = 0x1000, -+ }; -+ -+ vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE | -+ PROT_READ, MAP_SHARED, container_device_fd, 0x6030000); -+ if (vaddr == MAP_FAILED) { -+ RTE_LOG(ERR, EAL, " mapping GITS region (errno = %d)", errno); -+ return -errno; -+ } -+ -+ msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64); -+ map.vaddr = (unsigned long)vaddr; -+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map); -+ if (ret == 0) -+ return 0; -+ -+ RTE_LOG(ERR, EAL, "vfio_map_irq_region fails (errno = %d)", errno); -+ return -errno; -+} -+ -+int vfio_dmamap_mem_region(uint64_t vaddr, -+ uint64_t iova, -+ uint64_t size) -+{ -+ struct vfio_group *group; -+ struct vfio_iommu_type1_dma_map dma_map = { -+ .argsz = sizeof(dma_map), -+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, -+ }; -+ -+ dma_map.vaddr = vaddr; -+ dma_map.size = size; -+ dma_map.iova = iova; -+ -+ /* SET DMA MAP for IOMMU */ -+ group = &vfio_groups[0]; -+ if (ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map)) { -+ /* todo changes these to RTE_LOG */ -+ RTE_LOG(ERR, EAL, "SWP: VFIO_IOMMU_MAP_DMA API Error %d.\n", errno); -+ return -1; -+ } -+ return 0; -+} -+ -+static int32_t setup_dmamap(void) -+{ -+ int ret; -+ struct vfio_group *group; -+ struct vfio_iommu_type1_dma_map dma_map = { -+ .argsz = sizeof(struct vfio_iommu_type1_dma_map), -+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, -+ }; -+ -+ int i; -+ const struct rte_memseg *memseg; -+ -+ for (i = 0; i < RTE_MAX_MEMSEG; i++) { -+ memseg = rte_eal_get_physmem_layout(); -+ if (memseg == NULL) { -+ RTE_LOG(ERR, EAL, -+ "\nError Cannot get physical layout\n"); -+ return -ENODEV; -+ } -+ -+ if (memseg[i].addr == NULL && memseg[i].len == 0) { -+ break; -+ } -+ -+ dma_map.size = memseg[i].len; -+ dma_map.vaddr = memseg[i].addr_64; -+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA -+ dma_map.iova = memseg[i].phys_addr; -+#else -+ dma_map.iova = dma_map.vaddr; -+#endif -+ -+ /* SET DMA MAP for IOMMU */ -+ group = &vfio_groups[0]; -+ -+ printf("-->Initial SHM Virtual ADDR %llX\n", dma_map.vaddr); -+ printf("-----> DMA size 0x%llX\n", dma_map.size); -+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map); -+ if (ret) { -+ RTE_LOG(ERR, EAL, -+ "\nErr: VFIO_IOMMU_MAP_DMA API Error %d.\n", -+ errno); -+ return ret; -+ } -+ printf("-----> dma_map.vaddr = 0x%llX\n", dma_map.vaddr); -+ } -+ -+ /* TODO - This is a W.A. as VFIO currently does not add the mapping of -+ the interrupt region to SMMU. This should be removed once the -+ support is added in the Kernel. -+ */ -+ vfio_map_irq_region(group); -+ -+ return 0; -+} -+ -+static int vfio_set_group(struct vfio_group *group, int groupid) -+{ -+ char path[PATH_MAX]; -+ struct vfio_group_status status = { .argsz = sizeof(status) }; -+ -+ /* Open the VFIO file corresponding to the IOMMU group */ -+ snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); -+ -+ group->fd = open(path, O_RDWR); -+ if (group->fd < 0) { -+ RTE_LOG(ERR, EAL, "vfio: error opening %s\n", path); -+ return -1; -+ } -+ -+ /* Test & Verify that group is VIABLE & AVAILABLE */ -+ if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { -+ RTE_LOG(ERR, EAL, "vfio: error getting group status\n"); -+ close(group->fd); -+ return -1; -+ } -+ if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { -+ RTE_LOG(ERR, EAL, "vfio: group not viable\n"); -+ close(group->fd); -+ return -1; -+ } -+ /* Since Group is VIABLE, Store the groupid */ -+ group->groupid = groupid; -+ -+ /* Now connect this IOMMU group to given container */ -+ if (vfio_connect_container(group)) { -+ RTE_LOG(ERR, EAL, -+ "vfio: error sonnecting container with group %d\n", -+ groupid); -+ close(group->fd); -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static int32_t setup_vfio_grp(char *vfio_container) -+{ -+ char path[PATH_MAX]; -+ char iommu_group_path[PATH_MAX], *group_name; -+ struct vfio_group *group = NULL; -+ struct stat st; -+ int groupid; -+ int ret, len, i; -+ -+ printf("\tProcessing Container = %s\n", vfio_container); -+ sprintf(path, "/sys/bus/fsl-mc/devices/%s", vfio_container); -+ /* Check whether ls-container exists or not */ -+ printf("\tcontainer device path = %s\n", path); -+ if (stat(path, &st) < 0) { -+ RTE_LOG(ERR, EAL, "vfio: Error (%d) getting FSL-MC device (%s)\n", -+ errno, path); -+ return -errno; -+ } -+ -+ /* DPRC container exists. NOw checkout the IOMMU Group */ -+ strncat(path, "/iommu_group", sizeof(path) - strlen(path) - 1); -+ -+ len = readlink(path, iommu_group_path, PATH_MAX); -+ if (len == -1) { -+ RTE_LOG(ERR, EAL, "\tvfio: error no iommu_group for device\n"); -+ RTE_LOG(ERR, EAL, "\t%s: len = %d, errno = %d\n", -+ path, len, errno); -+ return -errno; -+ } -+ -+ iommu_group_path[len] = 0; -+ group_name = basename(iommu_group_path); -+ if (sscanf(group_name, "%d", &groupid) != 1) { -+ RTE_LOG(ERR, EAL, "\tvfio: error reading %s: %m\n", path); -+ return -errno; -+ } -+ -+ RTE_LOG(INFO, EAL, "\tvfio: iommu group id = %d\n", groupid); -+ -+ /* Check if group already exists */ -+ for (i = 0; i < VFIO_MAX_GRP; i++) { -+ group = &vfio_groups[i]; -+ if (group->groupid == groupid) { -+ RTE_LOG(ERR, EAL, "groupid already exists %d\n", groupid); -+ return 0; -+ } -+ } -+ -+ if (vfio_set_group(group, groupid)) { -+ RTE_LOG(ERR, EAL, "group setup failure - %d\n", groupid); -+ return -ENODEV; -+ } -+ -+ /* Get Device information */ -+ ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, vfio_container); -+ if (ret < 0) { -+ RTE_LOG(ERR, EAL, "\tvfio: error getting device %s fd from group %d\n", -+ vfio_container, group->groupid); -+ return ret; -+ } -+ container_device_fd = ret; -+ RTE_LOG(INFO, EAL, "vfio: Container FD is [0x%X]\n", container_device_fd); -+ /* Set up SMMU */ -+ ret = setup_dmamap(); -+ if (ret) { -+ RTE_LOG(ERR, EAL, ": Setting dma map\n"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+ -+static int64_t vfio_map_mcp_obj(struct vfio_group *group, char *mcp_obj) -+{ -+ int64_t v_addr = (int64_t)MAP_FAILED; -+ int32_t ret, mc_fd; -+ -+ struct vfio_device_info d_info = { .argsz = sizeof(d_info) }; -+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; -+ -+ /* getting the mcp object's fd*/ -+ mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj); -+ if (mc_fd < 0) { -+ RTE_LOG(ERR, EAL, "vfio: error getting device %s fd from group %d\n", -+ mcp_obj, group->fd); -+ return v_addr; -+ } -+ -+ /* getting device info*/ -+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info); -+ if (ret < 0) { -+ RTE_LOG(ERR, EAL, "vfio: error getting DEVICE_INFO\n"); -+ goto MC_FAILURE; -+ } -+ -+ /* getting device region info*/ -+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); -+ if (ret < 0) { -+ RTE_LOG(ERR, EAL, "vfio: error getting REGION_INFO\n"); -+ goto MC_FAILURE; -+ } -+ -+ RTE_LOG(INFO, EAL, "region offset = %llx , region size = %llx\n", -+ reg_info.offset, reg_info.size); -+ -+ v_addr = (uint64_t)mmap(NULL, reg_info.size, -+ PROT_WRITE | PROT_READ, MAP_SHARED, -+ mc_fd, reg_info.offset); -+ -+MC_FAILURE: -+ close(mc_fd); -+ -+ return v_addr; -+} -+ -+/* Following function shall fetch total available list of MC devices -+ * from VFIO container & populate private list of devices and other -+ * data structures -+ */ -+static int vfio_process_group_devices(void) -+{ -+ struct vfio_device *vdev; -+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) }; -+ char *temp_obj, *object_type, *mcp_obj, *dev_name; -+ int32_t object_id, i, dev_fd, ret; -+ DIR *d; -+ struct dirent *dir; -+ char path[PATH_MAX]; -+ int64_t v_addr; -+ int ndev_count; -+ struct vfio_group *group = &vfio_groups[0]; -+ -+ sprintf(path, "/sys/kernel/iommu_groups/%d/devices", group->groupid); -+ -+ d = opendir(path); -+ if (!d) { -+ RTE_LOG(ERR, EAL,"Unable to open directory %s\n", path); -+ return -1; -+ } -+ -+ /*Counting the number of devices in a group and getting the mcp ID*/ -+ ndev_count = 0; -+ mcp_obj = NULL; -+ while ((dir = readdir(d)) != NULL) { -+ if (dir->d_type == DT_LNK) { -+ ndev_count++; -+ if (!strncmp("dpmcp", dir->d_name, 5)) { -+ if (mcp_obj) -+ free(mcp_obj); -+ mcp_obj = malloc(sizeof(dir->d_name)); -+ if (!mcp_obj) { -+ RTE_LOG(ERR, EAL, -+ "Unable to allocate memory\n"); -+ return -ENOMEM; -+ } -+ strcpy(mcp_obj, dir->d_name); -+ temp_obj = strtok(dir->d_name, "."); -+ temp_obj = strtok(NULL, "."); -+ sscanf(temp_obj, "%d", &mcp_id); -+ } -+ } -+ } -+ closedir(d); -+ -+ if (!mcp_obj) { -+ RTE_LOG(ERR, EAL,"MCP Object not Found\n"); -+ return -ENODEV; -+ } -+ RTE_LOG(INFO, EAL,"Total devices in conatiner = %d, MCP ID = %d\n", -+ ndev_count, mcp_id); -+ -+ /* Allocate the memory depends upon number of objects in a group*/ -+ group->vfio_device = (struct vfio_device *)malloc(ndev_count * sizeof(struct vfio_device)); -+ if (!(group->vfio_device)) { -+ RTE_LOG(ERR, EAL,"Unable to allocate memory\n"); -+ free(mcp_obj); -+ return -ENOMEM; -+ } -+ -+ /* Allocate memory for MC Portal list */ -+ mcp_ptr_list = malloc(sizeof(void *) * 1); -+ if (!mcp_ptr_list) { -+ RTE_LOG(ERR, EAL, "NO Memory!\n"); -+ free(mcp_obj); -+ goto FAILURE; -+ } -+ -+ v_addr = vfio_map_mcp_obj(group, mcp_obj); -+ free(mcp_obj); -+ if (v_addr == (int64_t)MAP_FAILED) { -+ RTE_LOG(ERR, EAL, "mapping region (errno = %d)\n", errno); -+ goto FAILURE; -+ } -+ -+ RTE_LOG(INFO, EAL, "MC has VIR_ADD = 0x%ld\n", v_addr); -+ -+ mcp_ptr_list[0] = (void *)v_addr; -+ -+ d = opendir(path); -+ if (!d) { -+ RTE_LOG(ERR, EAL, "Directory %s not able to open\n", path); -+ goto FAILURE; -+ } -+ -+ i = 0; -+ printf("\nDPAA2 - Parsing MC Device Objects:\n"); -+ /* Parsing each object and initiating them*/ -+ while ((dir = readdir(d)) != NULL) { -+ if (dir->d_type != DT_LNK) -+ continue; -+ if (!strncmp("dprc", dir->d_name, 4) || !strncmp("dpmcp", dir->d_name, 5)) -+ continue; -+ dev_name = malloc(sizeof(dir->d_name)); -+ if (!dev_name) { -+ RTE_LOG(ERR, EAL, "Unable to allocate memory\n"); -+ goto FAILURE; -+ } -+ strcpy(dev_name, dir->d_name); -+ object_type = strtok(dir->d_name, "."); -+ temp_obj = strtok(NULL, "."); -+ sscanf(temp_obj, "%d", &object_id); -+ RTE_LOG(INFO, EAL, "%s ", dev_name); -+ -+ /* getting the device fd*/ -+ dev_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, dev_name); -+ if (dev_fd < 0) { -+ RTE_LOG(ERR, EAL, "vfio getting device %s fd from group %d\n", -+ dev_name, group->fd); -+ free(dev_name); -+ goto FAILURE; -+ } -+ -+ free(dev_name); -+ vdev = &group->vfio_device[group->object_index++]; -+ vdev->fd = dev_fd; -+ vdev->index = i; -+ i++; -+ /* Get Device inofrmation */ -+ if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &device_info)) { -+ RTE_LOG(ERR, EAL, "VFIO_DEVICE_FSL_MC_GET_INFO failed\n"); -+ goto FAILURE; -+ } -+ -+ if (!strcmp(object_type, "dpni") || -+ !strcmp(object_type, "dpseci")) { -+ struct rte_pci_device *dev; -+ -+ dev = malloc(sizeof(struct rte_pci_device)); -+ if (dev == NULL) { -+ return -1; -+ } -+ memset(dev, 0, sizeof(*dev)); -+ /* store hw_id of dpni/dpseci device */ -+ dev->addr.devid = object_id; -+ dev->id.vendor_id = FSL_VENDOR_ID; -+ dev->id.device_id = (strcmp(object_type, "dpseci"))? -+ FSL_MC_DPNI_DEVID: FSL_MC_DPSECI_DEVID; -+ -+ TAILQ_INSERT_TAIL(&pci_device_list, dev, next); -+ } -+ -+ if (!strcmp(object_type, "dpio")) { -+ dpaa2_create_dpio_device(vdev, &device_info, object_id); -+ } -+ -+ if (!strcmp(object_type, "dpbp")) { -+ dpaa2_create_dpbp_device(object_id); -+ } -+ } -+ closedir(d); -+ -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret) -+ RTE_LOG(ERR, EAL, "%s(): Err in affining qbman swp\n", __func__); -+ -+ return 0; -+ -+FAILURE: -+ free(group->vfio_device); -+ group->vfio_device = NULL; -+ return -1; -+} -+ -+/* -+ * Scan the content of the PCI bus, and the devices in the devices -+ * list -+ */ -+static int -+fsl_mc_scan(void) -+{ -+ char path[PATH_MAX]; -+ struct stat st; -+ -+ ls2bus_container = getenv("DPRC"); -+ -+ if (ls2bus_container == NULL) { -+ RTE_LOG(WARNING, EAL, "vfio container not set in env DPRC\n"); -+ return -1; -+ } -+ -+ snprintf(path, sizeof(path), "%s/%s", SYSFS_FSL_MC_DEVICES, -+ ls2bus_container); -+ /* Check whether LS-Container exists or not */ -+ RTE_LOG(INFO, EAL, "\tcontainer device path = %s\n", path); -+ if (stat(path, &st) < 0) { -+ RTE_LOG(ERR, EAL, "vfio:fsl-mc device does not exists\n"); -+ return -1; -+ } -+ return 0; -+} -+ -+/* Init the FSL-MC- LS2 EAL subsystem */ -+int -+rte_eal_dpaa2_init(void) -+{ -+ if (fsl_mc_scan() < 0) -+ return -1; -+ -+#ifdef VFIO_PRESENT -+ if (setup_vfio_grp(ls2bus_container)) { -+ RTE_LOG(ERR, EAL, "setup_vfio_grp\n"); -+ return -1; -+ } -+ if (vfio_process_group_devices()) { -+ RTE_LOG(ERR, EAL, "vfio_process_group_devices\n"); -+ return -1; -+ } -+#endif -+ return 0; -+} -diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h -new file mode 100644 -index 0000000..7fc5ec6 ---- /dev/null -+++ b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h -@@ -0,0 +1,102 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef _EAL_VFIO_FSL_MC_H_ -+#define _EAL_VFIO_FSL_MC_H_ -+ -+#include -+#include -+#include -+#include "eal_vfio.h" -+ -+#define FSL_VENDOR_ID 0x1957 -+#define FSL_MC_DPNI_DEVID 7 -+#define FSL_MC_DPSECI_DEVID 3 -+ -+#define VFIO_MAX_GRP 1 -+#define VFIO_MAX_CONTAINERS 1 -+ -+#define DPAA2_MBUF_HW_ANNOTATION 64 -+#define DPAA2_FD_PTA_SIZE 64 -+#define DPAA2_PACKET_LAYOUT_ALIGN 256 -+#if (RTE_CACHE_LINE_SIZE == 128) -+#define DPAA2_RES 128 -+#else -+#define DPAA2_RES 0 -+#endif -+ -+#define DPAA2_ALIGN_ROUNDUP(x, align) ((align) * (((x) + align - 1) / (align))) -+#define DPAA2_ALIGN_ROUNDUP_PTR(x, align)\ -+ ((void *)DPAA2_ALIGN_ROUNDUP((uintptr_t)(x), (uintptr_t)(align))) -+ -+typedef struct vfio_device { -+ int fd; /* fsl_mc root container device ?? */ -+ int index; /*index of child object */ -+ struct vfio_device *child; /* Child object */ -+} vfio_device; -+ -+typedef struct vfio_group { -+ int fd; /* /dev/vfio/"groupid" */ -+ int groupid; -+ struct vfio_container *container; -+ int object_index; -+ struct vfio_device *vfio_device; -+} vfio_group; -+ -+typedef struct vfio_container { -+ int fd; /* /dev/vfio/vfio */ -+ int used; -+ int index; /* index in group list */ -+ struct vfio_group *group_list[VFIO_MAX_GRP]; -+} vfio_container; -+ -+int vfio_dmamap_mem_region( -+ uint64_t vaddr, -+ uint64_t iova, -+ uint64_t size); -+ -+/* initialize the NXP/FSL dpaa2 accelerators */ -+int rte_eal_dpaa2_init(void); -+ -+int dpaa2_create_dpio_device(struct vfio_device *vdev, -+ struct vfio_device_info *obj_info, -+ int object_id); -+ -+int dpaa2_create_dpbp_device(int dpbp_id); -+ -+int dpaa2_affine_qbman_swp(void); -+ -+int dpaa2_affine_qbman_swp_sec(void); -+ -+#endif -+ -diff --git a/lib/librte_mbuf/Makefile b/lib/librte_mbuf/Makefile -index 8d62b0d..92446d1 100644 ---- a/lib/librte_mbuf/Makefile -+++ b/lib/librte_mbuf/Makefile -@@ -36,6 +36,10 @@ LIB = librte_mbuf.a - - CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 - -+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y) -+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal -+endif -+ - EXPORT_MAP := rte_mbuf_version.map - - LIBABIVER := 2 -diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c -index dc0467c..c4009ee 100644 ---- a/lib/librte_mbuf/rte_mbuf.c -+++ b/lib/librte_mbuf/rte_mbuf.c -@@ -60,6 +60,59 @@ - #include - #include - -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ -+int __attribute__((weak)) -+hw_mbuf_create_pool( -+struct rte_mempool __rte_unused *mp) -+{ -+ RTE_LOG(WARNING, MBUF, "%s/n", __func__); -+ return -1; -+} -+ -+int __attribute__((weak)) -+hw_mbuf_init( -+ struct rte_mempool __rte_unused*mp, -+ void __rte_unused *_m) -+{ -+ RTE_LOG(WARNING, MBUF, "%s/n", __func__); -+ return -1; -+} -+ -+int __attribute__((weak)) -+hw_mbuf_alloc( -+ struct rte_mempool __rte_unused *mp, -+ void __rte_unused **obj_p) -+{ -+ RTE_LOG(WARNING, MBUF, "%s/n", __func__); -+ return -1; -+} -+ -+int __attribute__((weak)) -+hw_mbuf_free(void __rte_unused *m) -+{ -+ RTE_LOG(WARNING, MBUF, "%s/n", __func__); -+ return -1; -+} -+ -+int __attribute__((weak)) -+hw_mbuf_alloc_bulk(struct rte_mempool __rte_unused *pool, -+ void __rte_unused **obj_table, -+ unsigned __rte_unused count) -+{ -+ RTE_LOG(WARNING, MBUF, "%s/n", __func__); -+ return -1; -+} -+ -+int __attribute__((weak)) -+hw_mbuf_free_bulk(struct rte_mempool __rte_unused *mp, -+ void __rte_unused * const *obj_table, -+ unsigned __rte_unused n) -+{ -+ RTE_LOG(WARNING, MBUF, "%s/n", __func__); -+ return -1; -+} -+#endif - /* - * ctrlmbuf constructor, given as a callback function to - * rte_mempool_create() -@@ -106,6 +159,10 @@ rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg) - - mbp_priv = rte_mempool_get_priv(mp); - memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv)); -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ if (mp->flags & MEMPOOL_F_HW_PKT_POOL) -+ hw_mbuf_create_pool(mp); -+#endif - } - - /* -@@ -122,6 +179,12 @@ rte_pktmbuf_init(struct rte_mempool *mp, - struct rte_mbuf *m = _m; - uint32_t mbuf_size, buf_len, priv_size; - -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ if (mp->flags & MEMPOOL_F_HW_PKT_POOL) { -+ if (hw_mbuf_init(mp, m) == 0) -+ return; -+ } -+#endif - priv_size = rte_pktmbuf_priv_size(mp); - mbuf_size = sizeof(struct rte_mbuf) + priv_size; - buf_len = rte_pktmbuf_data_room_size(mp); -@@ -170,7 +233,11 @@ rte_pktmbuf_pool_create(const char *name, unsigned n, - return rte_mempool_create(name, n, elt_size, - cache_size, sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, &mbp_priv, rte_pktmbuf_init, NULL, -+#if defined(RTE_LIBRTE_DPAA2_PMD) -+ socket_id, MEMPOOL_F_HW_PKT_POOL); -+#else - socket_id, 0); -+#endif - } - - /* do some sanity checks on a mbuf: panic if it fails */ -diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile -index a6898ef..6116d52 100644 ---- a/lib/librte_mempool/Makefile -+++ b/lib/librte_mempool/Makefile -@@ -36,6 +36,10 @@ LIB = librte_mempool.a - - CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 - -+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y) -+CFLAGS += -I$(RTE_SDK)/lib/librte_mbuf -+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal -+endif - EXPORT_MAP := rte_mempool_version.map - - LIBABIVER := 1 -diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c -index f8781e1..ac9595d 100644 ---- a/lib/librte_mempool/rte_mempool.c -+++ b/lib/librte_mempool/rte_mempool.c -@@ -60,6 +60,10 @@ - - #include "rte_mempool.h" - -+#ifdef RTE_LIBRTE_DPAA2_PMD -+#include "eal_vfio_fsl_mc.h" -+#endif -+ - TAILQ_HEAD(rte_mempool_list, rte_tailq_entry); - - static struct rte_tailq_elem rte_mempool_tailq = { -@@ -316,6 +320,12 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, - - /* this is the size of an object, including header and trailer */ - sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size; -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ if (flags & MEMPOOL_F_HW_PKT_POOL) -+ sz->total_size += DPAA2_ALIGN_ROUNDUP( -+ DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE, -+ DPAA2_PACKET_LAYOUT_ALIGN); -+#endif - - return sz->total_size; - } -@@ -590,6 +600,9 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, - mp->cache_size = cache_size; - mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size); - mp->private_data_size = private_data_size; -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ mp->offload_ptr = UINTPTR_MAX; -+#endif - - /* calculate address of the first element for continuous mempool. */ - obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) + -diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h -index 9745bf0..304a434 100644 ---- a/lib/librte_mempool/rte_mempool.h -+++ b/lib/librte_mempool/rte_mempool.h -@@ -215,7 +215,10 @@ struct rte_mempool { - uintptr_t elt_va_end; - /**< Virtual address of the mempool object. */ - phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT]; -+#ifdef RTE_LIBRTE_DPAA2_PMD - /**< Array of physical page addresses for the mempool objects buffer. */ -+ uintptr_t offload_ptr; -+#endif - - } __rte_cache_aligned; - -@@ -223,7 +226,18 @@ struct rte_mempool { - #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/ - #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/ - #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ -- -+#ifdef RTE_LIBRTE_DPAA2_PMD -+#define MEMPOOL_F_HW_PKT_POOL 0x0010 /**< HW offload for packet buffer mgmt*/ -+ -+int hw_mbuf_create_pool(struct rte_mempool *mp); -+int hw_mbuf_init(struct rte_mempool *mp, void *_m); -+int hw_mbuf_alloc(struct rte_mempool *mp, void **obj_p); -+int hw_mbuf_free(void *_m); -+int hw_mbuf_alloc_bulk(struct rte_mempool *pool, -+ void **obj_table, unsigned count); -+int hw_mbuf_free_bulk(struct rte_mempool *mp, void * const *obj_table, -+ unsigned n); -+#endif - /** - * @internal When debug is enabled, store some statistics. - * -@@ -877,6 +891,12 @@ static inline void __attribute__((always_inline)) - rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, - unsigned n) - { -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ if (mp->flags & MEMPOOL_F_HW_PKT_POOL) { -+ if (hw_mbuf_free_bulk(mp, obj_table, n) == 0) -+ return; -+ } -+#endif - __mempool_check_cookies(mp, obj_table, n, 0); - __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT)); - } -@@ -1091,6 +1111,14 @@ static inline int __attribute__((always_inline)) - rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) - { - int ret; -+ -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ if (mp->flags & MEMPOOL_F_HW_PKT_POOL) { -+ ret = hw_mbuf_alloc_bulk(mp, obj_table, n); -+ if (ret > -2) -+ return ret; -+ } -+#endif - ret = __mempool_get_bulk(mp, obj_table, n, - !(mp->flags & MEMPOOL_F_SC_GET)); - if (ret == 0) -diff --git a/mk/machine/dpaa2/rte.vars.mk b/mk/machine/dpaa2/rte.vars.mk -new file mode 100644 -index 0000000..8541633 ---- /dev/null -+++ b/mk/machine/dpaa2/rte.vars.mk -@@ -0,0 +1,60 @@ -+# BSD LICENSE -+# -+# Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved. -+# -+# Redistribution and use in source and binary forms, with or without -+# modification, are permitted provided that the following conditions -+# are met: -+# -+# * Redistributions of source code must retain the above copyright -+# notice, this list of conditions and the following disclaimer. -+# * Redistributions in binary form must reproduce the above copyright -+# notice, this list of conditions and the following disclaimer in -+# the documentation and/or other materials provided with the -+# distribution. -+# * Neither the name of Freescale Semiconductor nor the names of its -+# contributors may be used to endorse or promote products derived -+# from this software without specific prior written permission. -+# -+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+# -+# machine: -+# -+# - can define ARCH variable (overridden by cmdline value) -+# - can define CROSS variable (overridden by cmdline value) -+# - define MACHINE_CFLAGS variable (overridden by cmdline value) -+# - define MACHINE_LDFLAGS variable (overridden by cmdline value) -+# - define MACHINE_ASFLAGS variable (overridden by cmdline value) -+# - can define CPU_CFLAGS variable (overridden by cmdline value) that -+# overrides the one defined in arch. -+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that -+# overrides the one defined in arch. -+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that -+# overrides the one defined in arch. -+# - may override any previously defined variable -+# -+ -+# ARCH = -+# CROSS = -+# MACHINE_CFLAGS = -+# MACHINE_LDFLAGS = -+# MACHINE_ASFLAGS = -+# CPU_CFLAGS = -+# CPU_LDFLAGS = -+# CPU_ASFLAGS = -+MACHINE_CFLAGS += -march=armv8-a -+ -+ifdef CONFIG_RTE_ARCH_ARM_TUNE -+MACHINE_CFLAGS += -mcpu=$(CONFIG_RTE_ARCH_ARM_TUNE) -+endif -diff --git a/mk/rte.app.mk b/mk/rte.app.mk -index c66e491..ee25ba3 100644 ---- a/mk/rte.app.mk -+++ b/mk/rte.app.mk -@@ -125,6 +125,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile - _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond - - _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt -+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += -lrte_pmd_dpaa2 - - ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n) - # plugins (link only if static libraries) --- -2.5.0 - diff --git a/dpdk/dpdk-16.04_patches/0018-enic-fix-segfault-on-Tx-path-after-restarting-a-devi.patch b/dpdk/dpdk-16.04_patches/0018-enic-fix-segfault-on-Tx-path-after-restarting-a-devi.patch deleted file mode 100644 index 10b66375..00000000 --- a/dpdk/dpdk-16.04_patches/0018-enic-fix-segfault-on-Tx-path-after-restarting-a-devi.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 60971e62dcbb50a7ef1c3839e8b33b5aef6a48fe Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Fri, 1 Jul 2016 12:24:45 -0700 -Subject: [PATCH 18/25] enic: fix segfault on Tx path after restarting a device - -If you stop then start a port that had already sent some packets, -there was a segfault due to not resetting the number of completed -sends to zero. - -Fixes: d5d882fe1a11 ("Tx path rewrite to reduce Host CPU overhead") - -Signed-off-by: Nelson Escobar -Reviewed-by: John Daley ---- - drivers/net/enic/base/vnic_wq.c | 2 ++ - drivers/net/enic/base/vnic_wq.h | 1 + - 2 files changed, 3 insertions(+) - -diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c -index ccbbd61..7026bfe 100644 ---- a/drivers/net/enic/base/vnic_wq.c -+++ b/drivers/net/enic/base/vnic_wq.c -@@ -206,6 +206,8 @@ void vnic_wq_clean(struct vnic_wq *wq, - - wq->head_idx = 0; - wq->tail_idx = 0; -+ wq->last_completed_index = 0; -+ *((uint32_t *)wq->cqmsg_rz->addr) = 0; - - iowrite32(0, &wq->ctrl->fetch_index); - iowrite32(0, &wq->ctrl->posted_index); -diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h -index 37c3ff9..faf3bfa 100644 ---- a/drivers/net/enic/base/vnic_wq.h -+++ b/drivers/net/enic/base/vnic_wq.h -@@ -38,6 +38,7 @@ - - #include "vnic_dev.h" - #include "vnic_cq.h" -+#include - - /* Work queue control */ - struct vnic_wq_ctrl { --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0019-enic-fix-Rx-queue-initialization-after-restarting-a-.patch b/dpdk/dpdk-16.04_patches/0019-enic-fix-Rx-queue-initialization-after-restarting-a-.patch deleted file mode 100644 index 3e107486..00000000 --- a/dpdk/dpdk-16.04_patches/0019-enic-fix-Rx-queue-initialization-after-restarting-a-.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 8d336ba9cbcb4832b992201497afe07afcd4f2e1 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Fri, 1 Jul 2016 12:32:45 -0700 -Subject: [PATCH 19/25] enic: fix Rx queue initialization after restarting a - device - -If you stop then start a port that had already received some packets, -the NIC could fetch discriptors from the wrong location. This could -effectivly reduce the size of the Rx queue by a random amount and -cause packet drop or reduced performance. - -Reset the NIC fetch index to 0 when allocating and posting mbuf -addresses to the NIC. - -Fixes: 947d860c821f ("enic: improve Rx performance") - -Signed-off-by: John Daley -Reviewed-by: Nelson Escobar ---- - drivers/net/enic/enic_main.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index be17707..68532d3 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -346,6 +346,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) - dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", - enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold); - iowrite32(rq->posted_index, &rq->ctrl->posted_index); -+ iowrite32(0, &rq->ctrl->fetch_index); - rte_rmb(); - - // printf("posted %d buffers to %s rq\n", rq->ring.desc_count, --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0020-net-enic-fix-releasing-mbufs-when-tearing-down-Rx-qu.patch b/dpdk/dpdk-16.04_patches/0020-net-enic-fix-releasing-mbufs-when-tearing-down-Rx-qu.patch deleted file mode 100644 index 47bcda23..00000000 --- a/dpdk/dpdk-16.04_patches/0020-net-enic-fix-releasing-mbufs-when-tearing-down-Rx-qu.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 3f276178609472585a85fe440b549013a64d9327 Mon Sep 17 00:00:00 2001 -From: Nelson Escobar -Date: Tue, 14 Jun 2016 16:55:34 -0700 -Subject: [PATCH 20/25] net/enic: fix releasing mbufs when tearing down Rx - queue - -When trying to release the mbufs, the function was incorrectly -iterating over the max size configured instead of the actual size -of the ring. - -Fixes: 947d860c821f ("enic: improve Rx performance") - -Signed-off-by: Nelson Escobar -Reviewed-by: John Daley ---- - drivers/net/enic/enic_main.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 68532d3..56ec96e 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -91,7 +91,7 @@ static int is_eth_addr_valid(uint8_t *addr) - } - - static void --enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) -+enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq) - { - uint16_t i; - -@@ -100,7 +100,7 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) - return; - } - -- for (i = 0; i < enic->config.rq_desc_count; i++) { -+ for (i = 0; i < rq->ring.desc_count; i++) { - if (rq->mbuf_ring[i]) { - rte_pktmbuf_free_seg(rq->mbuf_ring[i]); - rq->mbuf_ring[i] = NULL; --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0021-net-enic-fix-crash-when-releasing-queues.patch b/dpdk/dpdk-16.04_patches/0021-net-enic-fix-crash-when-releasing-queues.patch deleted file mode 100644 index 56d2c677..00000000 --- a/dpdk/dpdk-16.04_patches/0021-net-enic-fix-crash-when-releasing-queues.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 38e154305ee5fd2ee454c19218ca144ffd1535f1 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Sat, 11 Jun 2016 10:27:04 -0700 -Subject: [PATCH 21/25] net/enic: fix crash when releasing queues - -If device configuration failed due to a lack of resources, such as -if more queues are requested than are available, the queue release -functions are called with NULL pointers which were being dereferenced. - -Skip releasing queues if they are NULL pointers. - -Fixes: fefed3d1e62c ("enic: new driver") - -Signed-off-by: John Daley ---- - drivers/net/enic/enic_main.c | 21 ++++++++++++++++----- - 1 file changed, 16 insertions(+), 5 deletions(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 56ec96e..4e5594f 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -462,9 +462,15 @@ int enic_alloc_intr_resources(struct enic *enic) - - void enic_free_rq(void *rxq) - { -- struct vnic_rq *rq_sop = (struct vnic_rq *)rxq; -- struct enic *enic = vnic_dev_priv(rq_sop->vdev); -- struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx]; -+ struct vnic_rq *rq_sop, *rq_data; -+ struct enic *enic; -+ -+ if (rxq == NULL) -+ return; -+ -+ rq_sop = (struct vnic_rq *)rxq; -+ enic = vnic_dev_priv(rq_sop->vdev); -+ rq_data = &enic->rq[rq_sop->data_queue_idx]; - - enic_rxmbuf_queue_release(enic, rq_sop); - if (rq_data->in_use) -@@ -657,9 +663,14 @@ err_exit: - - void enic_free_wq(void *txq) - { -- struct vnic_wq *wq = (struct vnic_wq *)txq; -- struct enic *enic = vnic_dev_priv(wq->vdev); -+ struct vnic_wq *wq; -+ struct enic *enic; -+ -+ if (txq == NULL) -+ return; - -+ wq = (struct vnic_wq *)txq; -+ enic = vnic_dev_priv(wq->vdev); - rte_memzone_free(wq->cqmsg_rz); - vnic_wq_free(wq); - vnic_cq_free(&enic->cq[enic->rq_count + wq->index]); --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0022-net-enic-improve-out-of-resources-error-handling.patch b/dpdk/dpdk-16.04_patches/0022-net-enic-improve-out-of-resources-error-handling.patch deleted file mode 100644 index bf6df811..00000000 --- a/dpdk/dpdk-16.04_patches/0022-net-enic-improve-out-of-resources-error-handling.patch +++ /dev/null @@ -1,67 +0,0 @@ -From db0a30a2e61a3bf2f6cb8e74203dab84280b0419 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Sat, 11 Jun 2016 10:27:05 -0700 -Subject: [PATCH 22/25] net/enic: improve out of resources error handling - -If configuration fails due to lack of resources, be more specific -about which resources are lacking - work queues, read queues or -completion queues. Return -EINVAL instead of -1 if more queeues -are requested than are available. - -Fixes: fefed3d1e62c ("enic: new driver") - -Signed-off-by: John Daley ---- - drivers/net/enic/enic_main.c | 30 ++++++++++++++++++++---------- - 1 file changed, 20 insertions(+), 10 deletions(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 4e5594f..43e4af1 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -970,22 +970,32 @@ static void enic_dev_deinit(struct enic *enic) - int enic_set_vnic_res(struct enic *enic) - { - struct rte_eth_dev *eth_dev = enic->rte_dev; -+ int rc = 0; - -- if ((enic->rq_count < eth_dev->data->nb_rx_queues) || -- (enic->wq_count < eth_dev->data->nb_tx_queues)) { -- dev_err(dev, "Not enough resources configured, aborting\n"); -- return -1; -+ if (enic->rq_count < eth_dev->data->nb_rx_queues) { -+ dev_err(dev, "Not enough Receive queues. Requested:%u, Configured:%u\n", -+ eth_dev->data->nb_rx_queues, enic->rq_count); -+ rc = -EINVAL; -+ } -+ if (enic->wq_count < eth_dev->data->nb_tx_queues) { -+ dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n", -+ eth_dev->data->nb_tx_queues, enic->wq_count); -+ rc = -EINVAL; - } - -- enic->rq_count = eth_dev->data->nb_rx_queues; -- enic->wq_count = eth_dev->data->nb_tx_queues; - if (enic->cq_count < (enic->rq_count + enic->wq_count)) { -- dev_err(dev, "Not enough resources configured, aborting\n"); -- return -1; -+ dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n", -+ enic->rq_count + enic->wq_count, enic->cq_count); -+ rc = -EINVAL; - } - -- enic->cq_count = enic->rq_count + enic->wq_count; -- return 0; -+ if (rc == 0) { -+ enic->rq_count = eth_dev->data->nb_rx_queues; -+ enic->wq_count = eth_dev->data->nb_tx_queues; -+ enic->cq_count = enic->rq_count + enic->wq_count; -+ } -+ -+ return rc; - } - - static int enic_dev_init(struct enic *enic) --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0023-net-enic-fix-memory-freeing.patch b/dpdk/dpdk-16.04_patches/0023-net-enic-fix-memory-freeing.patch deleted file mode 100644 index 0cc423ac..00000000 --- a/dpdk/dpdk-16.04_patches/0023-net-enic-fix-memory-freeing.patch +++ /dev/null @@ -1,238 +0,0 @@ -From 2040a8f4e47d3bc4b7f0f11faa863a4bd8d8891d Mon Sep 17 00:00:00 2001 -From: Nelson Escobar -Date: Thu, 23 Jun 2016 16:14:58 -0700 -Subject: [PATCH 23/25] net/enic: fix memory freeing - -enic_alloc_consistent() allocated memory, but enic_free_consistent() -was an empty function, so allocated memory was never freed. - -This commit adds a list and lock to the enic structure to keep track -of the memzones allocated in enic_alloc_consistent(), and -enic_free_consistent() uses that information to properly free memory. - -Fixes: fefed3d1e62c ("enic: new driver") - -Signed-off-by: Nelson Escobar -Reviewed-by: John Daley ---- - drivers/net/enic/base/vnic_dev.c | 14 +++++------ - drivers/net/enic/base/vnic_dev.h | 2 +- - drivers/net/enic/enic.h | 11 ++++++++ - drivers/net/enic/enic_main.c | 54 ++++++++++++++++++++++++++++++++++------ - 4 files changed, 65 insertions(+), 16 deletions(-) - -diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c -index e8a5028..fc2e4cc 100644 ---- a/drivers/net/enic/base/vnic_dev.c -+++ b/drivers/net/enic/base/vnic_dev.c -@@ -83,7 +83,7 @@ struct vnic_dev { - struct vnic_intr_coal_timer_info intr_coal_timer_info; - void *(*alloc_consistent)(void *priv, size_t size, - dma_addr_t *dma_handle, u8 *name); -- void (*free_consistent)(struct rte_pci_device *hwdev, -+ void (*free_consistent)(void *priv, - size_t size, void *vaddr, - dma_addr_t dma_handle); - }; -@@ -101,7 +101,7 @@ void *vnic_dev_priv(struct vnic_dev *vdev) - void vnic_register_cbacks(struct vnic_dev *vdev, - void *(*alloc_consistent)(void *priv, size_t size, - dma_addr_t *dma_handle, u8 *name), -- void (*free_consistent)(struct rte_pci_device *hwdev, -+ void (*free_consistent)(void *priv, - size_t size, void *vaddr, - dma_addr_t dma_handle)) - { -@@ -807,7 +807,7 @@ int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) - int vnic_dev_notify_unset(struct vnic_dev *vdev) - { - if (vdev->notify && !vnic_dev_in_reset(vdev)) { -- vdev->free_consistent(vdev->pdev, -+ vdev->free_consistent(vdev->priv, - sizeof(struct vnic_devcmd_notify), - vdev->notify, - vdev->notify_pa); -@@ -924,16 +924,16 @@ void vnic_dev_unregister(struct vnic_dev *vdev) - { - if (vdev) { - if (vdev->notify) -- vdev->free_consistent(vdev->pdev, -+ vdev->free_consistent(vdev->priv, - sizeof(struct vnic_devcmd_notify), - vdev->notify, - vdev->notify_pa); - if (vdev->stats) -- vdev->free_consistent(vdev->pdev, -+ vdev->free_consistent(vdev->priv, - sizeof(struct vnic_stats), - vdev->stats, vdev->stats_pa); - if (vdev->fw_info) -- vdev->free_consistent(vdev->pdev, -+ vdev->free_consistent(vdev->priv, - sizeof(struct vnic_devcmd_fw_info), - vdev->fw_info, vdev->fw_info_pa); - kfree(vdev); -@@ -1041,7 +1041,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, - - ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait); - *entry = (u16)a0; -- vdev->free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa); -+ vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa); - } else if (cmd == CLSF_DEL) { - a0 = *entry; - ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); -diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h -index 113d6ac..689442f 100644 ---- a/drivers/net/enic/base/vnic_dev.h -+++ b/drivers/net/enic/base/vnic_dev.h -@@ -102,7 +102,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, - void vnic_register_cbacks(struct vnic_dev *vdev, - void *(*alloc_consistent)(void *priv, size_t size, - dma_addr_t *dma_handle, u8 *name), -- void (*free_consistent)(struct rte_pci_device *hwdev, -+ void (*free_consistent)(void *priv, - size_t size, void *vaddr, - dma_addr_t dma_handle)); - void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, -diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h -index d2de6ee..175adb8 100644 ---- a/drivers/net/enic/enic.h -+++ b/drivers/net/enic/enic.h -@@ -46,6 +46,8 @@ - #include "vnic_rss.h" - #include "enic_res.h" - #include "cq_enet_desc.h" -+#include -+#include - - #define DRV_NAME "enic_pmd" - #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver" -@@ -96,6 +98,11 @@ struct enic_soft_stats { - rte_atomic64_t rx_packet_errors; - }; - -+struct enic_memzone_entry { -+ const struct rte_memzone *rz; -+ LIST_ENTRY(enic_memzone_entry) entries; -+}; -+ - /* Per-instance private data structure */ - struct enic { - struct enic *next; -@@ -140,6 +147,10 @@ struct enic { - unsigned int intr_count; - - struct enic_soft_stats soft_stats; -+ -+ /* linked list storing memory allocations */ -+ LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list; -+ rte_spinlock_t memzone_list_lock; - }; - - static inline unsigned int enic_sop_rq(__rte_unused struct enic *enic, unsigned int rq) -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 43e4af1..0547f3b 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -356,12 +356,14 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) - } - - static void * --enic_alloc_consistent(__rte_unused void *priv, size_t size, -+enic_alloc_consistent(void *priv, size_t size, - dma_addr_t *dma_handle, u8 *name) - { - void *vaddr; - const struct rte_memzone *rz; - *dma_handle = 0; -+ struct enic *enic = (struct enic *)priv; -+ struct enic_memzone_entry *mze; - - rz = rte_memzone_reserve_aligned((const char *)name, - size, SOCKET_ID_ANY, 0, ENIC_ALIGN); -@@ -374,16 +376,49 @@ enic_alloc_consistent(__rte_unused void *priv, size_t size, - vaddr = rz->addr; - *dma_handle = (dma_addr_t)rz->phys_addr; - -+ mze = rte_malloc("enic memzone entry", -+ sizeof(struct enic_memzone_entry), 0); -+ -+ if (!mze) { -+ pr_err("%s : Failed to allocate memory for memzone list\n", -+ __func__); -+ rte_memzone_free(rz); -+ } -+ -+ mze->rz = rz; -+ -+ rte_spinlock_lock(&enic->memzone_list_lock); -+ LIST_INSERT_HEAD(&enic->memzone_list, mze, entries); -+ rte_spinlock_unlock(&enic->memzone_list_lock); -+ - return vaddr; - } - - static void --enic_free_consistent(__rte_unused struct rte_pci_device *hwdev, -- __rte_unused size_t size, -- __rte_unused void *vaddr, -- __rte_unused dma_addr_t dma_handle) -+enic_free_consistent(void *priv, -+ __rte_unused size_t size, -+ void *vaddr, -+ dma_addr_t dma_handle) - { -- /* Nothing to be done */ -+ struct enic_memzone_entry *mze; -+ struct enic *enic = (struct enic *)priv; -+ -+ rte_spinlock_lock(&enic->memzone_list_lock); -+ LIST_FOREACH(mze, &enic->memzone_list, entries) { -+ if (mze->rz->addr == vaddr && -+ mze->rz->phys_addr == dma_handle) -+ break; -+ } -+ if (mze == NULL) { -+ rte_spinlock_unlock(&enic->memzone_list_lock); -+ dev_warning(enic, -+ "Tried to free memory, but couldn't find it in the memzone list\n"); -+ return; -+ } -+ LIST_REMOVE(mze, entries); -+ rte_spinlock_unlock(&enic->memzone_list_lock); -+ rte_memzone_free(mze->rz); -+ rte_free(mze); - } - - static void -@@ -840,7 +875,7 @@ static int enic_set_rsskey(struct enic *enic) - rss_key_buf_pa, - sizeof(union vnic_rss_key)); - -- enic_free_consistent(enic->pdev, sizeof(union vnic_rss_key), -+ enic_free_consistent(enic, sizeof(union vnic_rss_key), - rss_key_buf_va, rss_key_buf_pa); - - return err; -@@ -867,7 +902,7 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) - rss_cpu_buf_pa, - sizeof(union vnic_rss_cpu)); - -- enic_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), -+ enic_free_consistent(enic, sizeof(union vnic_rss_cpu), - rss_cpu_buf_va, rss_cpu_buf_pa); - - return err; -@@ -1049,6 +1084,9 @@ int enic_probe(struct enic *enic) - goto err_out; - } - -+ LIST_INIT(&enic->memzone_list); -+ rte_spinlock_init(&enic->memzone_list_lock); -+ - vnic_register_cbacks(enic->vdev, - enic_alloc_consistent, - enic_free_consistent); --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0024-net-enic-fix-Rx-scatter-with-multiple-queues.patch b/dpdk/dpdk-16.04_patches/0024-net-enic-fix-Rx-scatter-with-multiple-queues.patch deleted file mode 100644 index d581702d..00000000 --- a/dpdk/dpdk-16.04_patches/0024-net-enic-fix-Rx-scatter-with-multiple-queues.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 658069b0c5994e260cd7d0a7dfc7f03d78dd4f5a Mon Sep 17 00:00:00 2001 -From: Nelson Escobar -Date: Tue, 28 Jun 2016 11:49:11 -0700 -Subject: [PATCH 24/25] net/enic: fix Rx scatter with multiple queues - -The Rx scatter patch failed to make a few changes and resulted in -problems when using multiple receive queues (RQs) in DPDK (ie RSS) -since the wrong adapter resources were being used. - -- get and use the correct completion queue index associated with a - receive queue. -- set the correct receive queue index when using RSS - -Fixes: 856d7ba7ed22 ("net/enic: support scattered Rx") - -Signed-off-by: Nelson Escobar -Reviewed-by: John Daley ---- - drivers/net/enic/enic.h | 6 +++++- - drivers/net/enic/enic_main.c | 10 ++++++---- - 2 files changed, 11 insertions(+), 5 deletions(-) - -diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h -index 175adb8..8b0fa05 100644 ---- a/drivers/net/enic/enic.h -+++ b/drivers/net/enic/enic.h -@@ -165,7 +165,11 @@ static inline unsigned int enic_data_rq(__rte_unused struct enic *enic, unsigned - - static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq) - { -- return rq; -+ /* Scatter rx uses two receive queues together with one -+ * completion queue, so the completion queue number is no -+ * longer the same as the rq number. -+ */ -+ return rq / 2; - } - - static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 0547f3b..976c9da 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -252,19 +252,20 @@ void enic_init_vnic_resources(struct enic *enic) - vnic_dev_stats_clear(enic->vdev); - - for (index = 0; index < enic->rq_count; index++) { -+ cq_idx = enic_cq_rq(enic, enic_sop_rq(enic, index)); -+ - vnic_rq_init(&enic->rq[enic_sop_rq(enic, index)], -- enic_cq_rq(enic, index), -+ cq_idx, - error_interrupt_enable, - error_interrupt_offset); - - data_rq = &enic->rq[enic_data_rq(enic, index)]; - if (data_rq->in_use) - vnic_rq_init(data_rq, -- enic_cq_rq(enic, index), -+ cq_idx, - error_interrupt_enable, - error_interrupt_offset); - -- cq_idx = enic_cq_rq(enic, index); - vnic_cq_init(&enic->cq[cq_idx], - 0 /* flow_control_enable */, - 1 /* color_enable */, -@@ -896,7 +897,8 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) - return -ENOMEM; - - for (i = 0; i < (1 << rss_hash_bits); i++) -- (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; -+ (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] = -+ enic_sop_rq(enic, i % enic->rq_count); - - err = enic_set_rss_cpu(enic, - rss_cpu_buf_pa, --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0025-enic-fixup-of-Rx-Scatter-patch.patch b/dpdk/dpdk-16.04_patches/0025-enic-fixup-of-Rx-Scatter-patch.patch deleted file mode 100644 index e4e9f430..00000000 --- a/dpdk/dpdk-16.04_patches/0025-enic-fixup-of-Rx-Scatter-patch.patch +++ /dev/null @@ -1,169 +0,0 @@ -From 3131adb7f4195771bf54b294b2ee496055c3e65d Mon Sep 17 00:00:00 2001 -From: Nelson Escobar -Date: Tue, 14 Jun 2016 11:54:01 -0700 -Subject: [PATCH 25/25] enic: fixup of Rx Scatter patch - -A version of the Rx Scatter patch was used by VPP before the -patch was accepted in dpdk.org. This patch contains the change -made to the patch before it was accepted. - -Composed of internal dpdk devel patches: -enic: fixup rq count usage in wake of rx scatter -enic: update checks since RX scatter uses 2 VIC RQs per app RQ. -enic: fix packet type and flags when doing scatter Rx - -fixes: ENIC scatter RX - -Signed-off-by: Nelson Escobar ---- - drivers/net/enic/enic.h | 12 ++++++++++-- - drivers/net/enic/enic_ethdev.c | 7 +++++-- - drivers/net/enic/enic_main.c | 19 +++++++++++-------- - drivers/net/enic/enic_res.c | 5 +++-- - drivers/net/enic/enic_rxtx.c | 7 +++++-- - 5 files changed, 34 insertions(+), 16 deletions(-) - -diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h -index 8b0fa05..9cc9f0b 100644 ---- a/drivers/net/enic/enic.h -+++ b/drivers/net/enic/enic.h -@@ -55,8 +55,11 @@ - #define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc" - - #define ENIC_WQ_MAX 8 --#define ENIC_RQ_MAX 8 --#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) -+/* With Rx scatter support, we use two RQs on VIC per RQ used by app. Both -+ * RQs use the same CQ. -+ */ -+#define ENIC_RQ_MAX 16 -+#define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2)) - #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) - - #define VLAN_ETH_HLEN 18 -@@ -163,6 +166,11 @@ static inline unsigned int enic_data_rq(__rte_unused struct enic *enic, unsigned - return rq * 2 + 1; - } - -+static inline unsigned int enic_vnic_rq_count(struct enic *enic) -+{ -+ return (enic->rq_count * 2); -+} -+ - static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq) - { - /* Scatter rx uses two receive queues together with one -diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c -index 697ff82..e5b84e1 100644 ---- a/drivers/net/enic/enic_ethdev.c -+++ b/drivers/net/enic/enic_ethdev.c -@@ -269,9 +269,12 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, - struct enic *enic = pmd_priv(eth_dev); - - ENICPMD_FUNC_TRACE(); -- if (queue_idx >= ENIC_RQ_MAX) { -+ /* With Rx scatter support, two RQs are now used on VIC per RQ used -+ * by the application. -+ */ -+ if (queue_idx * 2 >= ENIC_RQ_MAX) { - dev_err(enic, -- "Max number of RX queues exceeded. Max is %d\n", -+ "Max number of RX queues exceeded. Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n", - ENIC_RQ_MAX); - return -EINVAL; - } -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 976c9da..ff94ee2 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -133,7 +133,7 @@ static void enic_log_q_error(struct enic *enic) - error_status); - } - -- for (i = 0; i < enic->rq_count; i++) { -+ for (i = 0; i < enic_vnic_rq_count(enic); i++) { - error_status = vnic_rq_error_status(&enic->rq[i]); - if (error_status) - dev_err(enic, "RQ[%d] error_status %d\n", i, -@@ -486,7 +486,7 @@ int enic_alloc_intr_resources(struct enic *enic) - - dev_info(enic, "vNIC resources used: "\ - "wq %d rq %d cq %d intr %d\n", -- enic->wq_count, enic->rq_count, -+ enic->wq_count, enic_vnic_rq_count(enic), - enic->cq_count, enic->intr_count); - - err = vnic_intr_alloc(enic->vdev, &enic->intr, 0); -@@ -790,10 +790,12 @@ int enic_disable(struct enic *enic) - if (err) - return err; - } -- for (i = 0; i < enic->rq_count; i++) { -- err = vnic_rq_disable(&enic->rq[i]); -- if (err) -- return err; -+ for (i = 0; i < enic_vnic_rq_count(enic); i++) { -+ if (enic->rq[i].in_use) { -+ err = vnic_rq_disable(&enic->rq[i]); -+ if (err) -+ return err; -+ } - } - - vnic_dev_set_reset_flag(enic->vdev, 1); -@@ -802,8 +804,9 @@ int enic_disable(struct enic *enic) - for (i = 0; i < enic->wq_count; i++) - vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); - -- for (i = 0; i < enic->rq_count; i++) -- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); -+ for (i = 0; i < enic_vnic_rq_count(enic); i++) -+ if (enic->rq[i].in_use) -+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); - for (i = 0; i < enic->cq_count; i++) - vnic_cq_clean(&enic->cq[i]); - vnic_intr_clean(&enic->intr); -diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c -index ebe379d..42edd84 100644 ---- a/drivers/net/enic/enic_res.c -+++ b/drivers/net/enic/enic_res.c -@@ -196,8 +196,9 @@ void enic_free_vnic_resources(struct enic *enic) - - for (i = 0; i < enic->wq_count; i++) - vnic_wq_free(&enic->wq[i]); -- for (i = 0; i < enic->rq_count; i++) -- vnic_rq_free(&enic->rq[i]); -+ for (i = 0; i < enic_vnic_rq_count(enic); i++) -+ if (enic->rq[i].in_use) -+ vnic_rq_free(&enic->rq[i]); - for (i = 0; i < enic->cq_count; i++) - vnic_cq_free(&enic->cq[i]); - vnic_intr_free(&enic->intr); -diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c -index 463b954..c68bbfb 100644 ---- a/drivers/net/enic/enic_rxtx.c -+++ b/drivers/net/enic/enic_rxtx.c -@@ -326,8 +326,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - - /* Fill in the rest of the mbuf */ - seg_length = enic_cq_rx_desc_n_bytes(&cqd); -- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); -- enic_cq_rx_to_pkt_flags(&cqd, rxmb); -+ - if (rq->is_sop) { - first_seg = rxmb; - first_seg->nb_segs = 1; -@@ -350,6 +349,10 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - continue; - } - -+ /* cq rx flags are only valid if eop bit is set */ -+ first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); -+ enic_cq_rx_to_pkt_flags(&cqd, first_seg); -+ - if (unlikely(packet_error)) { - rte_pktmbuf_free(first_seg); - rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0026-net-enic-fix-setting-MAC-address-when-a-port-is-rest.patch b/dpdk/dpdk-16.04_patches/0026-net-enic-fix-setting-MAC-address-when-a-port-is-rest.patch deleted file mode 100644 index 334e9bed..00000000 --- a/dpdk/dpdk-16.04_patches/0026-net-enic-fix-setting-MAC-address-when-a-port-is-rest.patch +++ /dev/null @@ -1,45 +0,0 @@ -From e5b60cf1199c51ee51c287988bdda3522fee748c Mon Sep 17 00:00:00 2001 -From: Nelson Escobar -Date: Thu, 7 Jul 2016 18:10:21 -0700 -Subject: [PATCH 1/2] net/enic: fix setting MAC address when a port is - restarted - -enic_disable() removed the MAC address when a port was shut down but -enic_enable() didn't add the MAC address back when the port was -started again. Move where we set the MAC address for the adapter from -enic_setup_finish() to a enic_enable() so that port restarting works -properly. - -Fixes: fefed3d1e62c ("enic: new driver") - -Signed-off-by: Nelson Escobar -Reviewed-by: John Daley ---- - drivers/net/enic/enic_main.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index d4e43b5..5d47c01 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -468,6 +468,8 @@ int enic_enable(struct enic *enic) - for (index = 0; index < enic->rq_count; index++) - enic_start_rq(enic, index); - -+ vnic_dev_add_addr(enic->vdev, enic->mac_addr); -+ - vnic_dev_enable_wait(enic->vdev); - - /* Register and enable error interrupt */ -@@ -971,8 +973,6 @@ int enic_setup_finish(struct enic *enic) - return -1; - } - -- vnic_dev_add_addr(enic->vdev, enic->mac_addr); -- - /* Default conf */ - vnic_dev_packet_filter(enic->vdev, - 1 /* directed */, --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0027-net-enic-fix-removing-old-MAC-address-when-setting-n.patch b/dpdk/dpdk-16.04_patches/0027-net-enic-fix-removing-old-MAC-address-when-setting-n.patch deleted file mode 100644 index 1e58db9a..00000000 --- a/dpdk/dpdk-16.04_patches/0027-net-enic-fix-removing-old-MAC-address-when-setting-n.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 620b173ae0f77c1a5af2592a27b5db8a6ce88bb6 Mon Sep 17 00:00:00 2001 -From: Nelson Escobar -Date: Thu, 7 Jul 2016 18:11:08 -0700 -Subject: [PATCH 2/2] net/enic: fix removing old MAC address when setting new - one - -enic_set_mac_address() meant to remove the old MAC address before -setting the new one, but accidentally tried removing the new MAC -address before setting the new MAC address. - -Fixes: fefed3d1e62c ("enic: new driver") - -Signed-off-by: Nelson Escobar -Reviewed-by: John Daley ---- - drivers/net/enic/enic_main.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 5d47c01..d8669cc 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -215,7 +215,7 @@ void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) - return; - } - -- err = vnic_dev_del_addr(enic->vdev, mac_addr); -+ err = vnic_dev_del_addr(enic->vdev, enic->mac_addr); - if (err) { - dev_err(enic, "del mac addr failed\n"); - return; --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0028-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch b/dpdk/dpdk-16.04_patches/0028-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch deleted file mode 100644 index 5cd32cfe..00000000 --- a/dpdk/dpdk-16.04_patches/0028-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch +++ /dev/null @@ -1,1184 +0,0 @@ -From e462b3f07bcbd807f7f3c8e6077e886a92f46ff0 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Thu, 14 Jul 2016 09:59:01 -0700 -Subject: [PATCH 28/29] i40e: Add packet_type metadata in the i40e vPMD - -The ptype is decoded from the rx descriptor and stored -in the packet type field in the mbuf using the same function -as the non-vector driver. - -Signed-off-by: Damjan Marion -Signed-off-by: Jeff Shaw ---- - drivers/net/i40e/i40e_rxtx.c | 539 +------------------------------------ - drivers/net/i40e/i40e_rxtx.h | 564 +++++++++++++++++++++++++++++++++++++++ - drivers/net/i40e/i40e_rxtx_vec.c | 16 ++ - 3 files changed, 582 insertions(+), 537 deletions(-) - -diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c -index 4d35d83..511f016 100644 ---- a/drivers/net/i40e/i40e_rxtx.c -+++ b/drivers/net/i40e/i40e_rxtx.c -@@ -189,542 +189,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword) - } - #endif - --/* For each value it means, datasheet of hardware can tell more details -- * -- * @note: fix i40e_dev_supported_ptypes_get() if any change here. -- */ --static inline uint32_t --i40e_rxd_pkt_type_mapping(uint8_t ptype) --{ -- static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { -- /* L2 types */ -- /* [0] reserved */ -- [1] = RTE_PTYPE_L2_ETHER, -- [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, -- /* [3] - [5] reserved */ -- [6] = RTE_PTYPE_L2_ETHER_LLDP, -- /* [7] - [10] reserved */ -- [11] = RTE_PTYPE_L2_ETHER_ARP, -- /* [12] - [21] reserved */ -- -- /* Non tunneled IPv4 */ -- [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_FRAG, -- [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_NONFRAG, -- [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_UDP, -- /* [25] reserved */ -- [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_TCP, -- [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_SCTP, -- [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_ICMP, -- -- /* IPv4 --> IPv4 */ -- [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [32] reserved */ -- [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> IPv6 */ -- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [39] reserved */ -- [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN */ -- [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ -- [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [47] reserved */ -- [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ -- [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [54] reserved */ -- [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ -- [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -- [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [62] reserved */ -- [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -- [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [69] reserved */ -- [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -- [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -- [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [77] reserved */ -- [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -- [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [84] reserved */ -- [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* Non tunneled IPv6 */ -- [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_FRAG, -- [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_NONFRAG, -- [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_UDP, -- /* [91] reserved */ -- [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_TCP, -- [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_SCTP, -- [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_ICMP, -- -- /* IPv6 --> IPv4 */ -- [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [98] reserved */ -- [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> IPv6 */ -- [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [105] reserved */ -- [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN */ -- [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ -- [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [113] reserved */ -- [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ -- [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [120] reserved */ -- [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ -- [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -- [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [128] reserved */ -- [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -- [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [135] reserved */ -- [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -- [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -- [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [143] reserved */ -- [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -- [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [150] reserved */ -- [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* All others reserved */ -- }; -- -- return type_table[ptype]; --} -- - #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03 - #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01 - #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02 -@@ -2135,7 +1599,8 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) - #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC - dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc || - #endif -- dev->rx_pkt_burst == i40e_recv_scattered_pkts) -+ dev->rx_pkt_burst == i40e_recv_scattered_pkts || -+ dev->rx_pkt_burst == i40e_recv_pkts_vec) - return ptypes; - return NULL; - } -diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h -index 98179f0..c33782f 100644 ---- a/drivers/net/i40e/i40e_rxtx.h -+++ b/drivers/net/i40e/i40e_rxtx.h -@@ -255,4 +255,568 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev, - struct i40e_tx_queue *txq); - void i40e_set_tx_function(struct rte_eth_dev *dev); - -+/* For each value it means, datasheet of hardware can tell more details -+ * -+ * @note: fix i40e_dev_supported_ptypes_get() if any change here. -+ */ -+#define RTE_PTYPE_L2_ETHER_NSH 0x00000005 -+static inline uint32_t -+i40e_rxd_pkt_type_mapping(uint8_t ptype) -+{ -+ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { -+ /* L2 types */ -+ /* [0] reserved */ -+ [1] = RTE_PTYPE_L2_ETHER, -+ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, -+ /* [3] - [5] reserved */ -+ [6] = RTE_PTYPE_L2_ETHER_LLDP, -+ /* [7] - [10] reserved */ -+ [11] = RTE_PTYPE_L2_ETHER_ARP, -+ /* [12] - [21] reserved */ -+ -+ /* Non tunneled IPv4 */ -+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ /* [25] reserved */ -+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ -+ /* IPv4 --> IPv4 */ -+ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [32] reserved */ -+ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> IPv6 */ -+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [39] reserved */ -+ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN */ -+ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ -+ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [47] reserved */ -+ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ -+ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [54] reserved */ -+ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ -+ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -+ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [62] reserved */ -+ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -+ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [69] reserved */ -+ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -+ [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -+ [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [77] reserved */ -+ [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -+ [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [84] reserved */ -+ [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* Non tunneled IPv6 */ -+ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ /* [91] reserved */ -+ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ -+ /* IPv6 --> IPv4 */ -+ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [98] reserved */ -+ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> IPv6 */ -+ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [105] reserved */ -+ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN */ -+ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ -+ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [113] reserved */ -+ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ -+ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [120] reserved */ -+ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ -+ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -+ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [128] reserved */ -+ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -+ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [135] reserved */ -+ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -+ [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -+ [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [143] reserved */ -+ [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -+ [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [150] reserved */ -+ [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* L2 NSH packet type */ -+ [154] = RTE_PTYPE_L2_ETHER_NSH, -+ [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ -+ /* All others reserved */ -+ }; -+ -+ return type_table[ptype]; -+} -+ - #endif /* _I40E_RXTX_H_ */ -diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c -index 047aff5..defa581 100644 ---- a/drivers/net/i40e/i40e_rxtx_vec.c -+++ b/drivers/net/i40e/i40e_rxtx_vec.c -@@ -220,6 +220,21 @@ desc_pktlen_align(__m128i descs[4]) - *((uint16_t *)&descs[3]+7) = vol.e[3]; - } - -+static inline void -+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) -+{ -+ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]); -+ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); -+ -+ ptype0 = _mm_srli_epi64(ptype0, 30); -+ ptype1 = _mm_srli_epi64(ptype1, 30); -+ -+ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0)); -+ rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8)); -+ rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0)); -+ rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8)); -+} -+ - /* - * Notice: - * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet -@@ -413,6 +428,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, - pkt_mb2); - _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, - pkt_mb1); -+ desc_to_ptype_v(descs, &rx_pkts[pos]); - /* C.4 calc avaialbe number of desc */ - var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); - nb_pkts_recd += var; --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0029-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch b/dpdk/dpdk-16.04_patches/0029-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch deleted file mode 100644 index 9b0f0648..00000000 --- a/dpdk/dpdk-16.04_patches/0029-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch +++ /dev/null @@ -1,114 +0,0 @@ -From ff4d874754e5e420671cc78d82829cd7317542ad Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Thu, 14 Jul 2016 09:59:02 -0700 -Subject: [PATCH 29/29] i40e: Enable bad checksum flags in i40e vPMD - -Decode the checksum flags from the rx descriptor, setting -the appropriate bit in the mbuf ol_flags field when the flag -indicates a bad checksum. - -Signed-off-by: Damjan Marion -Signed-off-by: Jeff Shaw ---- - drivers/net/i40e/i40e_rxtx_vec.c | 55 +++++++++++++++++++++++----------------- - 1 file changed, 32 insertions(+), 23 deletions(-) - -diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c -index defa581..09ec6e6 100644 ---- a/drivers/net/i40e/i40e_rxtx_vec.c -+++ b/drivers/net/i40e/i40e_rxtx_vec.c -@@ -138,18 +138,14 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq) - static inline void - desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) - { -- __m128i vlan0, vlan1, rss; -- union { -- uint16_t e[4]; -- uint64_t dword; -- } vol; -+ __m128i vlan0, vlan1, rss, l3_l4e; - -- /* mask everything except rss and vlan flags -- *bit2 is for vlan tag, bits 13:12 for rss -- */ -- const __m128i rss_vlan_msk = _mm_set_epi16( -- 0x0000, 0x0000, 0x0000, 0x0000, -- 0x3004, 0x3004, 0x3004, 0x3004); -+ /* mask everything except RSS, flow director and VLAN flags -+ * bit2 is for VLAN tag, bit11 for flow director indication -+ * bit13:12 for RSS indication. -+ */ -+ const __m128i rss_vlan_msk = _mm_set_epi32( -+ 0x1c03004, 0x1c03004, 0x1c03004, 0x1c03004); - - /* map rss and vlan type to rss hash and vlan flag */ - const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, -@@ -162,23 +158,36 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) - 0, 0, 0, 0, - PKT_RX_FDIR, 0, PKT_RX_RSS_HASH, 0); - -- vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]); -- vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]); -- vlan0 = _mm_unpacklo_epi32(vlan0, vlan1); -+ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD, -+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_L4_CKSUM_BAD, -+ PKT_RX_IP_CKSUM_BAD, -+ 0); -+ -+ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]); -+ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]); -+ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1); - - vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); - vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); - -- rss = _mm_srli_epi16(vlan1, 12); -+ rss = _mm_srli_epi32(vlan1, 12); - rss = _mm_shuffle_epi8(rss_flags, rss); - -+ l3_l4e = _mm_srli_epi32(vlan1, 22); -+ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e); -+ - vlan0 = _mm_or_si128(vlan0, rss); -- vol.dword = _mm_cvtsi128_si64(vlan0); -+ vlan0 = _mm_or_si128(vlan0, l3_l4e); - -- rx_pkts[0]->ol_flags = vol.e[0]; -- rx_pkts[1]->ol_flags = vol.e[1]; -- rx_pkts[2]->ol_flags = vol.e[2]; -- rx_pkts[3]->ol_flags = vol.e[3]; -+ rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0); -+ rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2); -+ rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4); -+ rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6); - } - #else - #define desc_to_olflags_v(desc, rx_pkts) do {} while (0) -@@ -770,7 +779,8 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) - #ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE - /* whithout rx ol_flags, no VP flag report */ - if (rxmode->hw_vlan_strip != 0 || -- rxmode->hw_vlan_extend != 0) -+ rxmode->hw_vlan_extend != 0 || -+ rxmode->hw_ip_checksum != 0) - return -1; - #endif - -@@ -781,8 +791,7 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) - /* - no csum error report support - * - no header split support - */ -- if (rxmode->hw_ip_checksum == 1 || -- rxmode->header_split == 1) -+ if (rxmode->header_split == 1) - return -1; - - return 0; --- -2.7.4 - diff --git a/dpdk/dpdk-16.04_patches/0030-net-enic-fix-possible-Rx-corruption.patch b/dpdk/dpdk-16.04_patches/0030-net-enic-fix-possible-Rx-corruption.patch deleted file mode 100644 index 6bb043eb..00000000 --- a/dpdk/dpdk-16.04_patches/0030-net-enic-fix-possible-Rx-corruption.patch +++ /dev/null @@ -1,47 +0,0 @@ -From cee88bcfd49cbf142c13ee7b6d2e77166c80bb48 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Tue, 19 Jul 2016 13:41:14 -0700 -Subject: [PATCH] net/enic: fix possible Rx corruption - -Initialize the mbuf data offset to RTE_PKTMBUF_HEADROOM as the -enic takes ownership of them. If allocated mbufs had some offset -other than RTE_PKTMBUF_HEADROOM, the application would read mbuf -data starting at the wrong place and misinterpret the packet. - -Fixes: 856d7ba7ed22 ("net/enic: support scattered Rx") - -Reviewed-by: Nelson Escobar -Signed-off-by: John Daley ---- - drivers/net/enic/enic_main.c | 1 + - drivers/net/enic/enic_rxtx.c | 2 +- - 2 files changed, 2 insertions(+), 1 deletion(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 8cedebf..774fcb1 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -328,6 +328,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) - return -ENOMEM; - } - -+ mb->data_off = RTE_PKTMBUF_HEADROOM; - dma_addr = (dma_addr_t)(mb->buf_physaddr + RTE_PKTMBUF_HEADROOM); - rq_enet_desc_enc(rqd, dma_addr, - (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP -diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c -index c68bbfb..60f5062 100644 ---- a/drivers/net/enic/enic_rxtx.c -+++ b/drivers/net/enic/enic_rxtx.c -@@ -317,7 +317,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - ciflags = enic_cq_rx_desc_ciflags((struct cq_enet_rq_desc *) &cqd); - - /* Push descriptor for newly allocated mbuf */ -- -+ nmb->data_off = RTE_PKTMBUF_HEADROOM; - dma_addr = (dma_addr_t)(nmb->buf_physaddr + RTE_PKTMBUF_HEADROOM); - rq_enet_desc_enc(rqd_ptr, dma_addr, - (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP --- -2.7.0 - diff --git a/dpdk/dpdk-16.04_patches/0031-enic-fix-bug-introduced-with-scatter-rx.patch b/dpdk/dpdk-16.04_patches/0031-enic-fix-bug-introduced-with-scatter-rx.patch deleted file mode 100644 index 88586ebc..00000000 --- a/dpdk/dpdk-16.04_patches/0031-enic-fix-bug-introduced-with-scatter-rx.patch +++ /dev/null @@ -1,24 +0,0 @@ -commit f0ca43396ebeb85228f9737a52caa68fc06aa9ee -Author: Nelson Escobar -Date: Mon Jun 13 17:24:41 2016 -0700 - - enic: fix bug introduced with scatter rx - - We did not properly set the rq pointers we pass up to dpdk when - rx scatter was introduced. This resulted in segfaults whenever - more than one rq was being used. - -diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c -index e5b84e1..66fddb1 100644 ---- a/drivers/net/enic/enic_ethdev.c -+++ b/drivers/net/enic/enic_ethdev.c -@@ -279,7 +279,8 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, - return -EINVAL; - } - -- eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx]; -+ eth_dev->data->rx_queues[queue_idx] = -+ (void *)&enic->rq[enic_sop_rq(enic, queue_idx)]; - - ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc); - if (ret) { diff --git a/dpdk/dpdk-2.1.0_patches/0001-kni-fix-igb-build-with-kernel-4.2.patch b/dpdk/dpdk-2.1.0_patches/0001-kni-fix-igb-build-with-kernel-4.2.patch deleted file mode 100644 index 09bca06e..00000000 --- a/dpdk/dpdk-2.1.0_patches/0001-kni-fix-igb-build-with-kernel-4.2.patch +++ /dev/null @@ -1,78 +0,0 @@ -From 2de9d1629312a32f82c43167467640bc793805a6 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Mon, 12 Oct 2015 14:23:30 +0200 -Subject: [PATCH 1/9] kni: fix igb build with kernel 4.2 - -Kernel 4.2 has introduced two new parameters in ndo_bridge_getlink, -which breaks DPDK compilation. - -Linux: 7d4f8d87 ("switchdev: ad VLAN support for ports bridge-getlink") - -This patch adds the necessary checks to fix it. - -Signed-off-by: Pablo de Lara ---- - lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c | 13 +++++++++---- - lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h | 7 ++++++- - 2 files changed, 15 insertions(+), 5 deletions(-) - -diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c -index eed8df6..b330b20 100644 ---- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c -+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c -@@ -2250,14 +2250,14 @@ static int igb_ndo_bridge_setlink(struct net_device *dev, - } - - #ifdef HAVE_BRIDGE_FILTER --#ifdef HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK -+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS - static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u32 filter_mask, - int nlflags) - #else - static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u32 filter_mask) --#endif /* HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK */ -+#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ - #else - static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev) -@@ -2275,11 +2275,16 @@ static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - mode = BRIDGE_MODE_VEPA; - - #ifdef HAVE_NDO_FDB_ADD_VID --#ifdef HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK -+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS -+#ifdef HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK_VLAN_FILL -+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, -+ nlflags, filter_mask, NULL); -+#else - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); -+#endif /* HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK_VLAN_FILL */ - #else - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); --#endif /* HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK */ -+#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ - #else - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); - #endif /* HAVE_NDO_FDB_ADD_VID */ -diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h -index 852f80f..5f45b8b 100644 ---- a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h -+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h -@@ -3899,6 +3899,11 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type) - - #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0) ) - /* ndo_bridge_getlink adds new nlflags parameter */ --#define HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK -+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS - #endif /* >= 4.1.0 */ -+ -+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0) ) -+/* ndo_bridge_getlink adds new filter_mask and vlan_fill parameters */ -+#define HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK_VLAN_FILL -+#endif /* >= 4.2.0 */ - #endif /* _KCOMPAT_H_ */ --- -2.5.0 - diff --git a/dpdk/dpdk-2.1.0_patches/0002-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch b/dpdk/dpdk-2.1.0_patches/0002-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch deleted file mode 100644 index 2ce5004a..00000000 --- a/dpdk/dpdk-2.1.0_patches/0002-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch +++ /dev/null @@ -1,107 +0,0 @@ -From 3609c4fb4d07d4285e96187598f54cb21e9e9b08 Mon Sep 17 00:00:00 2001 -From: Shesha Sreenivasamurthy -Date: Wed, 2 Sep 2015 08:57:24 -0700 -Subject: [PATCH 2/9] mbuf: rearrange rte_mbuf metadata to suit vpp - -Offload structure in the second cache line, next pointer in the -first cache line. Issue reported to Intel. ---- - .../linuxapp/eal/include/exec-env/rte_kni_common.h | 10 +++++++-- - lib/librte_mbuf/rte_mbuf.h | 25 ++++++++++++++-------- - 2 files changed, 24 insertions(+), 11 deletions(-) - -diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h -index e9f38bd..d327f71 100644 ---- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h -+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h -@@ -111,6 +111,10 @@ struct rte_kni_fifo { - * The kernel image of the rte_mbuf struct, with only the relevant fields. - * Padding is necessary to assure the offsets of these fields - */ -+/* -+ * offload in the second cache line, next in the first. Better for vpp -+ * at least as of right now. -+ */ - struct rte_kni_mbuf { - void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE))); - char pad0[10]; -@@ -121,16 +125,18 @@ struct rte_kni_mbuf { - char pad2[4]; - uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */ - uint16_t data_len; /**< Amount of data in segment buffer. */ -+ char pad3[2]; - #else - char pad2[2]; - uint16_t data_len; /**< Amount of data in segment buffer. */ - uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */ -+ char pad3[4]; - #endif -+ void *next; - - /* fields on second cache line */ -- char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE))); -+ char pad4[12] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE))); - void *pool; -- void *next; - }; - - /* -diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h -index 8c2db1b..61cbbd7 100644 ---- a/lib/librte_mbuf/rte_mbuf.h -+++ b/lib/librte_mbuf/rte_mbuf.h -@@ -743,6 +743,12 @@ typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes - /** - * The generic rte_mbuf, containing a packet mbuf. - */ -+/* -+ * offload in the second cache line, next in the first. Better for vpp -+ * at least as of right now. -+ * If you change this structure, you must change the user-mode -+ * version in rte_mbuf.h -+ */ - struct rte_mbuf { - MARKER cacheline0; - -@@ -809,6 +815,16 @@ struct rte_mbuf { - uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */ - uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ - #endif /* RTE_NEXT_ABI */ -+ struct rte_mbuf *next; /**< Next segment of scattered packet. */ -+ -+ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ -+#ifdef RTE_NEXT_ABI -+ uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ -+#endif /* RTE_NEXT_ABI */ -+ -+ /* second cache line - fields only used in slow path or on TX */ -+ MARKER cacheline1 __rte_cache_aligned; -+ - union { - uint32_t rss; /**< RSS hash result if RSS enabled */ - struct { -@@ -828,21 +844,12 @@ struct rte_mbuf { - uint32_t usr; /**< User defined tags. See rte_distributor_process() */ - } hash; /**< hash information */ - -- uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ --#ifdef RTE_NEXT_ABI -- uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ --#endif /* RTE_NEXT_ABI */ -- -- /* second cache line - fields only used in slow path or on TX */ -- MARKER cacheline1 __rte_cache_aligned; -- - union { - void *userdata; /**< Can be used for external metadata */ - uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */ - }; - - struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */ -- struct rte_mbuf *next; /**< Next segment of scattered packet. */ - - /* fields to support TX offloads */ - union { --- -2.5.0 - diff --git a/dpdk/dpdk-2.1.0_patches/0003-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch b/dpdk/dpdk-2.1.0_patches/0003-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch deleted file mode 100644 index d1ea27ad..00000000 --- a/dpdk/dpdk-2.1.0_patches/0003-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 699252f0b685db4cd298e90f0e1d64e4792356f2 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Wed, 21 Oct 2015 14:46:12 +0200 -Subject: [PATCH 3/9] e1000: Set VLAN Rx Offload tag correctly - ---- - drivers/net/e1000/igb_rxtx.c | 30 ++++++++++++++++++++++++++++++ - lib/librte_ether/rte_ether.h | 3 +++ - 2 files changed, 33 insertions(+) - -diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c -index b13930e..7fe76c8 100644 ---- a/drivers/net/e1000/igb_rxtx.c -+++ b/drivers/net/e1000/igb_rxtx.c -@@ -885,6 +885,21 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); - pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); - pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); -+ { -+ /* -+ * Check packet for VLAN ethernet types and set -+ * RX Offload flag PKT_RX_VLAN_PKT accordingly. -+ */ -+ struct ether_hdr *eth_hdr = -+ rte_pktmbuf_mtod(rxm, struct ether_hdr *); -+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); -+ -+ if ((eth_type == ETHER_TYPE_VLAN) || -+ (eth_type == ETHER_TYPE_VLAN_AD) || -+ (eth_type == ETHER_TYPE_VLAN_9100) || -+ (eth_type == ETHER_TYPE_VLAN_9200)) -+ pkt_flags |= PKT_RX_VLAN_PKT; -+ } - rxm->ol_flags = pkt_flags; - #ifdef RTE_NEXT_ABI - rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower. -@@ -1123,6 +1138,21 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); - pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); - pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); -+ { -+ /* -+ * Check packet for VLAN ethernet types and set -+ * RX Offload flag PKT_RX_VLAN_PKT accordingly. -+ */ -+ struct ether_hdr *eth_hdr = -+ rte_pktmbuf_mtod(rxm, struct ether_hdr *); -+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); -+ -+ if ((eth_type == ETHER_TYPE_VLAN) || -+ (eth_type == ETHER_TYPE_VLAN_AD) || -+ (eth_type == ETHER_TYPE_VLAN_9100) || -+ (eth_type == ETHER_TYPE_VLAN_9200)) -+ pkt_flags |= PKT_RX_VLAN_PKT; -+ } - first_seg->ol_flags = pkt_flags; - #ifdef RTE_NEXT_ABI - first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb. -diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h -index 07c17d7..fd646ec 100644 ---- a/lib/librte_ether/rte_ether.h -+++ b/lib/librte_ether/rte_ether.h -@@ -332,6 +332,9 @@ struct vxlan_hdr { - #define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */ - #define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */ - #define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */ -+#define ETHER_TYPE_VLAN_AD 0x88a8 /**< IEEE 802.1AD VLAN tagging. */ -+#define ETHER_TYPE_VLAN_9100 0x9100 /**< VLAN 0x9100 tagging. */ -+#define ETHER_TYPE_VLAN_9200 0x9200 /**< VLAN 0x9200 tagging. */ - - #define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr)) - /**< VXLAN tunnel header length. */ --- -2.5.0 - diff --git a/dpdk/dpdk-2.1.0_patches/0004-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch b/dpdk/dpdk-2.1.0_patches/0004-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch deleted file mode 100644 index 75241173..00000000 --- a/dpdk/dpdk-2.1.0_patches/0004-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 67d1c25af7fa16df40a8305405066ba6a40ac659 Mon Sep 17 00:00:00 2001 -From: Shesha Sreenivasamurthy -Date: Wed, 2 Sep 2015 08:46:39 -0700 -Subject: [PATCH 4/9] ixgbe: Wait a bit longer for autonegotiation to leave - state 0 - ---- - drivers/net/ixgbe/base/ixgbe_82599.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c -index f0deb59..ae66380 100644 ---- a/drivers/net/ixgbe/base/ixgbe_82599.c -+++ b/drivers/net/ixgbe/base/ixgbe_82599.c -@@ -2442,7 +2442,7 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, - autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); - /* Wait for AN to leave state 0 */ -- for (i = 0; i < 10; i++) { -+ for (i = 0; i < 50; i++) { - msec_delay(4); - anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); - if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) --- -2.5.0 - diff --git a/dpdk/dpdk-2.1.0_patches/0005-eal-Temporarily-turn-off-unthrottled-RTE_LOG.patch b/dpdk/dpdk-2.1.0_patches/0005-eal-Temporarily-turn-off-unthrottled-RTE_LOG.patch deleted file mode 100644 index 245b43c1..00000000 --- a/dpdk/dpdk-2.1.0_patches/0005-eal-Temporarily-turn-off-unthrottled-RTE_LOG.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 9e28214eb784b9f68af6e0503f8cefe861f13440 Mon Sep 17 00:00:00 2001 -From: Shesha Sreenivasamurthy -Date: Wed, 2 Sep 2015 08:55:43 -0700 -Subject: [PATCH 5/9] eal: Temporarily turn off unthrottled RTE_LOG(...) - -Otherwise, /var/log/syslog eventually fills the disk. The error -condition seems only to affect ESXi VM's. It'd be worth suggesting log -throttling to the DPDK community. Much better to avoid making syslog -(...) calls in the first place. ---- - lib/librte_eal/linuxapp/eal/eal_interrupts.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c -index 3f87875..29a3539 100644 ---- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c -+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c -@@ -709,7 +709,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds) - * for epoll_wait. - */ - bytes_read = read(events[n].data.fd, &buf, bytes_read); -- if (bytes_read < 0) { -+ if (0 && bytes_read < 0) { - if (errno == EINTR || errno == EWOULDBLOCK) - continue; - --- -2.5.0 - diff --git a/dpdk/dpdk-2.1.0_patches/0006-virtio-Cleanup-virtio-pmd-debug-log-output-reset-off.patch b/dpdk/dpdk-2.1.0_patches/0006-virtio-Cleanup-virtio-pmd-debug-log-output-reset-off.patch deleted file mode 100644 index 9306f112..00000000 --- a/dpdk/dpdk-2.1.0_patches/0006-virtio-Cleanup-virtio-pmd-debug-log-output-reset-off.patch +++ /dev/null @@ -1,77 +0,0 @@ -From 21a9bf50270f71ebda5acb5fc233b8279cec56a7 Mon Sep 17 00:00:00 2001 -From: Shesha Sreenivasamurthy -Date: Wed, 2 Sep 2015 08:48:09 -0700 -Subject: [PATCH 6/9] virtio: Cleanup virtio pmd debug log output, reset - offload field - ---- - drivers/net/virtio/virtio_ethdev.c | 10 +++++----- - drivers/net/virtio/virtio_rxtx.c | 4 +++- - 2 files changed, 8 insertions(+), 6 deletions(-) - -diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c -index 465d3cd..6a686fe 100644 ---- a/drivers/net/virtio/virtio_ethdev.c -+++ b/drivers/net/virtio/virtio_ethdev.c -@@ -1521,24 +1521,24 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet - link.link_speed = SPEED_10G; - - if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { -- PMD_INIT_LOG(DEBUG, "Get link status from hw"); - vtpci_read_dev_config(hw, - offsetof(struct virtio_net_config, status), - &status, sizeof(status)); - if ((status & VIRTIO_NET_S_LINK_UP) == 0) { - link.link_status = 0; -- PMD_INIT_LOG(DEBUG, "Port %d is down", -- dev->data->port_id); - } else { - link.link_status = 1; -- PMD_INIT_LOG(DEBUG, "Port %d is up", -- dev->data->port_id); - } - } else { - link.link_status = 1; /* Link up */ - } - virtio_dev_atomic_write_link_status(dev, &link); - -+ /* This message is far too noisy for normal use */ -+ if (0) -+ PMD_INIT_LOG(DEBUG, "Port %d is %s\n", dev->data->port_id, -+ link.link_status ? "up" : "down"); -+ - return (old.link_status == link.link_status) ? -1 : 0; - } - -diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c -index c5b53bb..9f0b759 100644 ---- a/drivers/net/virtio/virtio_rxtx.c -+++ b/drivers/net/virtio/virtio_rxtx.c -@@ -536,6 +536,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxm->next = NULL; - rxm->pkt_len = (uint32_t)(len[i] - hdr_size); - rxm->data_len = (uint16_t)(len[i] - hdr_size); -+ rxm->ol_flags = 0; - - if (hw->vlan_strip) - rte_vlan_strip(rxm); -@@ -651,6 +652,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, - rxm->next = NULL; - rxm->pkt_len = (uint32_t)(len[0] - hdr_size); - rxm->data_len = (uint16_t)(len[0] - hdr_size); -+ rxm->ol_flags = 0; - - rxm->port = rxvq->port_id; - rx_pkts[nb_rx] = rxm; -@@ -752,7 +754,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) - if (unlikely(nb_pkts < 1)) - return nb_pkts; - -- PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); -+ PMD_TX_LOG(DEBUG, "%d packets to xmit\n", nb_pkts); - nb_used = VIRTQUEUE_NUSED(txvq); - - virtio_rmb(); --- -2.5.0 - diff --git a/dpdk/dpdk-2.1.0_patches/0008-enic-fix-dma-addr-of-outgoing-packets.patch b/dpdk/dpdk-2.1.0_patches/0008-enic-fix-dma-addr-of-outgoing-packets.patch deleted file mode 100644 index 31e828cf..00000000 --- a/dpdk/dpdk-2.1.0_patches/0008-enic-fix-dma-addr-of-outgoing-packets.patch +++ /dev/null @@ -1,28 +0,0 @@ -From cbb6efb18835860f06a9e02bf63a9fbc2e19d192 Mon Sep 17 00:00:00 2001 -From: Yoann Desmouceaux -Date: Fri, 19 Feb 2016 10:50:51 +0100 -Subject: [PATCH] enic: fix dma addr of outgoing packets - -The enic PMD driver send function uses a constant offset instead of relying on the data_off in the mbuf to find the start of the packet. - -Signed-off-by: Yoann Desmouceaux ---- - drivers/net/enic/enic_main.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index f47e96c..7bad59c 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -163,7 +163,7 @@ int enic_send_pkt(struct enic *enic, struct vnic_wq *wq, - uint8_t cq_entry = eop; - uint8_t vlan_tag_insert = 0; - uint64_t bus_addr = (dma_addr_t) -- (tx_pkt->buf_physaddr + RTE_PKTMBUF_HEADROOM); -+ (tx_pkt->buf_physaddr + tx_pkt->data_off); - - if (sop) { - if (ol_flags & PKT_TX_VLAN_PKT) --- -2.1.4 - diff --git a/dpdk/dpdk-2.2.0_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch b/dpdk/dpdk-2.2.0_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch deleted file mode 100644 index 6ed2fc61..00000000 --- a/dpdk/dpdk-2.2.0_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 4a599535445d16a1c55fac0bd71edc443c6c23f2 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Wed, 16 Dec 2015 03:21:21 +0100 -Subject: [PATCH 1/4] e1000: Set VLAN Rx Offload tag correctly - ---- - drivers/net/e1000/igb_rxtx.c | 30 ++++++++++++++++++++++++++++++ - lib/librte_ether/rte_ether.h | 3 +++ - 2 files changed, 33 insertions(+) - -diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c -index 996e7da..cbe80a1 100644 ---- a/drivers/net/e1000/igb_rxtx.c -+++ b/drivers/net/e1000/igb_rxtx.c -@@ -910,6 +910,21 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); - pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); - pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); -+ { -+ /* -+ * Check packet for VLAN ethernet types and set -+ * RX Offload flag PKT_RX_VLAN_PKT accordingly. -+ */ -+ struct ether_hdr *eth_hdr = -+ rte_pktmbuf_mtod(rxm, struct ether_hdr *); -+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); -+ -+ if ((eth_type == ETHER_TYPE_VLAN) || -+ (eth_type == ETHER_TYPE_VLAN_AD) || -+ (eth_type == ETHER_TYPE_VLAN_9100) || -+ (eth_type == ETHER_TYPE_VLAN_9200)) -+ pkt_flags |= PKT_RX_VLAN_PKT; -+ } - rxm->ol_flags = pkt_flags; - rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower. - lo_dword.hs_rss.pkt_info); -@@ -1146,6 +1161,21 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); - pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); - pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); -+ { -+ /* -+ * Check packet for VLAN ethernet types and set -+ * RX Offload flag PKT_RX_VLAN_PKT accordingly. -+ */ -+ struct ether_hdr *eth_hdr = -+ rte_pktmbuf_mtod(rxm, struct ether_hdr *); -+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); -+ -+ if ((eth_type == ETHER_TYPE_VLAN) || -+ (eth_type == ETHER_TYPE_VLAN_AD) || -+ (eth_type == ETHER_TYPE_VLAN_9100) || -+ (eth_type == ETHER_TYPE_VLAN_9200)) -+ pkt_flags |= PKT_RX_VLAN_PKT; -+ } - first_seg->ol_flags = pkt_flags; - first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb. - lower.lo_dword.hs_rss.pkt_info); -diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h -index 07c17d7..fd646ec 100644 ---- a/lib/librte_ether/rte_ether.h -+++ b/lib/librte_ether/rte_ether.h -@@ -332,6 +332,9 @@ struct vxlan_hdr { - #define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */ - #define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */ - #define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */ -+#define ETHER_TYPE_VLAN_AD 0x88a8 /**< IEEE 802.1AD VLAN tagging. */ -+#define ETHER_TYPE_VLAN_9100 0x9100 /**< VLAN 0x9100 tagging. */ -+#define ETHER_TYPE_VLAN_9200 0x9200 /**< VLAN 0x9200 tagging. */ - - #define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr)) - /**< VXLAN tunnel header length. */ --- -2.5.0 - diff --git a/dpdk/dpdk-2.2.0_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch b/dpdk/dpdk-2.2.0_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch deleted file mode 100644 index b7a50298..00000000 --- a/dpdk/dpdk-2.2.0_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 009cd67e5b1ed0592de0fb6ae2fa662ffc172dde Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Wed, 16 Dec 2015 03:22:11 +0100 -Subject: [PATCH 2/4] ixgbe: Wait a bit longer for autonegotiation to leave - ---- - drivers/net/ixgbe/base/ixgbe_82599.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c -index f0deb59..ae66380 100644 ---- a/drivers/net/ixgbe/base/ixgbe_82599.c -+++ b/drivers/net/ixgbe/base/ixgbe_82599.c -@@ -2442,7 +2442,7 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, - autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); - /* Wait for AN to leave state 0 */ -- for (i = 0; i < 10; i++) { -+ for (i = 0; i < 50; i++) { - msec_delay(4); - anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); - if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) --- -2.5.0 - diff --git a/dpdk/dpdk-2.2.0_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch b/dpdk/dpdk-2.2.0_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch deleted file mode 100644 index 874f666b..00000000 --- a/dpdk/dpdk-2.2.0_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch +++ /dev/null @@ -1,76 +0,0 @@ -From e2592eb622c33791d8ae51153360bd8249bdd056 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Wed, 16 Dec 2015 03:29:22 +0100 -Subject: [PATCH 3/4] virtio: Cleanup virtio pmd debug log output, reset - ---- - drivers/net/virtio/virtio_ethdev.c | 10 +++++----- - drivers/net/virtio/virtio_rxtx.c | 4 +++- - 2 files changed, 8 insertions(+), 6 deletions(-) - -diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c -index d928339..2fa1587 100644 ---- a/drivers/net/virtio/virtio_ethdev.c -+++ b/drivers/net/virtio/virtio_ethdev.c -@@ -1635,24 +1635,24 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet - link.link_speed = SPEED_10G; - - if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { -- PMD_INIT_LOG(DEBUG, "Get link status from hw"); - vtpci_read_dev_config(hw, - offsetof(struct virtio_net_config, status), - &status, sizeof(status)); - if ((status & VIRTIO_NET_S_LINK_UP) == 0) { - link.link_status = 0; -- PMD_INIT_LOG(DEBUG, "Port %d is down", -- dev->data->port_id); - } else { - link.link_status = 1; -- PMD_INIT_LOG(DEBUG, "Port %d is up", -- dev->data->port_id); - } - } else { - link.link_status = 1; /* Link up */ - } - virtio_dev_atomic_write_link_status(dev, &link); - -+ /* This message is far too noisy for normal use */ -+ if (0) -+ PMD_INIT_LOG(DEBUG, "Port %d is %s\n", dev->data->port_id, -+ link.link_status ? "up" : "down"); -+ - return (old.link_status == link.link_status) ? -1 : 0; - } - -diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c -index 74b39ef..2512bce 100644 ---- a/drivers/net/virtio/virtio_rxtx.c -+++ b/drivers/net/virtio/virtio_rxtx.c -@@ -618,6 +618,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxm->next = NULL; - rxm->pkt_len = (uint32_t)(len[i] - hdr_size); - rxm->data_len = (uint16_t)(len[i] - hdr_size); -+ rxm->ol_flags = 0; - - if (hw->vlan_strip) - rte_vlan_strip(rxm); -@@ -737,6 +738,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, - rxm->vlan_tci = 0; - rxm->pkt_len = (uint32_t)(len[0] - hdr_size); - rxm->data_len = (uint16_t)(len[0] - hdr_size); -+ rxm->ol_flags = 0; - - rxm->port = rxvq->port_id; - rx_pkts[nb_rx] = rxm; -@@ -838,7 +840,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) - if (unlikely(nb_pkts < 1)) - return nb_pkts; - -- PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); -+ PMD_TX_LOG(DEBUG, "%d packets to xmit\n", nb_pkts); - nb_used = VIRTQUEUE_NUSED(txvq); - - virtio_rmb(); --- -2.5.0 - diff --git a/dpdk/dpdk-2.2.0_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch b/dpdk/dpdk-2.2.0_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch deleted file mode 100644 index bee64dff..00000000 --- a/dpdk/dpdk-2.2.0_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch +++ /dev/null @@ -1,83 +0,0 @@ -From b8b575a3398c480f6e02525a0933e5e057139b78 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Wed, 16 Dec 2015 04:25:23 +0100 -Subject: [PATCH 4/4] mbuf: rearrange rte_mbuf metadata to suit vpp - ---- - .../linuxapp/eal/include/exec-env/rte_kni_common.h | 5 +++-- - lib/librte_mbuf/rte_mbuf.h | 20 ++++++++++++-------- - 2 files changed, 15 insertions(+), 10 deletions(-) - -diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h -index bd1cc09..a68a949 100644 ---- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h -+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h -@@ -120,11 +120,12 @@ struct rte_kni_mbuf { - char pad2[4]; - uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */ - uint16_t data_len; /**< Amount of data in segment buffer. */ -+ char pad3[8]; -+ void *next; - - /* fields on second cache line */ -- char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE))); -+ char pad4[16] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE))); - void *pool; -- void *next; - }; - - /* -diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h -index f234ac9..a1b4405 100644 ---- a/lib/librte_mbuf/rte_mbuf.h -+++ b/lib/librte_mbuf/rte_mbuf.h -@@ -734,6 +734,12 @@ struct rte_mbuf_offload; - /** - * The generic rte_mbuf, containing a packet mbuf. - */ -+/* -+ * offload in the second cache line, next in the first. Better for vpp -+ * at least as of right now. -+ * If you change this structure, you must change the user-mode -+ * version in rte_mbuf.h -+ */ - struct rte_mbuf { - MARKER cacheline0; - -@@ -786,6 +792,12 @@ struct rte_mbuf { - uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ - uint16_t data_len; /**< Amount of data in segment buffer. */ - uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */ -+ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ -+ uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ -+ struct rte_mbuf *next; /**< Next segment of scattered packet. */ -+ -+ /* second cache line - fields only used in slow path or on TX */ -+ MARKER cacheline1 __rte_cache_aligned; - - union { - uint32_t rss; /**< RSS hash result if RSS enabled */ -@@ -809,20 +821,12 @@ struct rte_mbuf { - uint32_t usr; /**< User defined tags. See rte_distributor_process() */ - } hash; /**< hash information */ - -- uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */ -- -- uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */ -- -- /* second cache line - fields only used in slow path or on TX */ -- MARKER cacheline1 __rte_cache_aligned; -- - union { - void *userdata; /**< Can be used for external metadata */ - uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */ - }; - - struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */ -- struct rte_mbuf *next; /**< Next segment of scattered packet. */ - - /* fields to support TX offloads */ - union { --- -2.5.0 - diff --git a/dpdk/dpdk-2.2.0_patches/0005-missing-include.patch b/dpdk/dpdk-2.2.0_patches/0005-missing-include.patch deleted file mode 100644 index c6211cd4..00000000 --- a/dpdk/dpdk-2.2.0_patches/0005-missing-include.patch +++ /dev/null @@ -1,24 +0,0 @@ -From a8767269f3ee545141e83e5a5f62ff24c29248a9 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Wed, 16 Dec 2015 04:43:40 +0100 -Subject: [PATCH 5/5] missing include - ---- - lib/librte_eal/linuxapp/eal/eal_timer.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/lib/librte_eal/linuxapp/eal/eal_timer.c b/lib/librte_eal/linuxapp/eal/eal_timer.c -index 9ceff33..d0792be 100644 ---- a/lib/librte_eal/linuxapp/eal/eal_timer.c -+++ b/lib/librte_eal/linuxapp/eal/eal_timer.c -@@ -51,6 +51,7 @@ - #include - #include - #include -+#include - - #include "eal_private.h" - #include "eal_internal_cfg.h" --- -2.5.0 - diff --git a/dpdk/dpdk-2.2.0_patches/0006-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch b/dpdk/dpdk-2.2.0_patches/0006-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch deleted file mode 100644 index f15e4aab..00000000 --- a/dpdk/dpdk-2.2.0_patches/0006-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 68d23609ec0c42773043383ff2939a30830e8069 Mon Sep 17 00:00:00 2001 -From: Bud Grise -Date: Tue, 2 Feb 2016 12:45:44 -0800 -Subject: [PATCH 6/8] Fix a crash in igb_uio driver when the device is removed. - -This crash happens because the device still has MSI configured, -the fix is to free the IRQ. - -Signed-off-by: Todd Foggoa (tfoggoa) ---- - lib/librte_eal/linuxapp/igb_uio/igb_uio.c | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c -index f5617d2..23a5cfa 100644 ---- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c -+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c -@@ -571,6 +571,12 @@ igbuio_pci_remove(struct pci_dev *dev) - udev = info->priv; - - sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); -+ -+ if (info->irq && (info->irq != UIO_IRQ_CUSTOM)){ -+ free_irq(info->irq, info->uio_dev); -+ info->irq = UIO_IRQ_NONE; -+ } -+ - uio_unregister_device(info); - igbuio_pci_release_iomem(info); - if (udev->mode == RTE_INTR_MODE_MSIX) --- -2.2.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0007-Allow-applications-to-override-rte_delay_us.patch b/dpdk/dpdk-2.2.0_patches/0007-Allow-applications-to-override-rte_delay_us.patch deleted file mode 100644 index 4a1494ef..00000000 --- a/dpdk/dpdk-2.2.0_patches/0007-Allow-applications-to-override-rte_delay_us.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 5d03f3ca8ddc7313de59e54d83912b1f3c049170 Mon Sep 17 00:00:00 2001 -From: "Todd Foggoa (tfoggoa)" -Date: Wed, 3 Feb 2016 08:35:27 -0800 -Subject: [PATCH 7/8] Allow applications to override rte_delay_us() - -Some applications may wish to define their own implentation of -usec delay other than the existing blocking one. The default -behavior remains unchanged. - -Signed-off-by: Todd Foggoa (tfoggoa) ---- - lib/librte_eal/common/eal_common_timer.c | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/lib/librte_eal/common/eal_common_timer.c b/lib/librte_eal/common/eal_common_timer.c -index 72371b8..5189fa5 100644 ---- a/lib/librte_eal/common/eal_common_timer.c -+++ b/lib/librte_eal/common/eal_common_timer.c -@@ -47,9 +47,21 @@ - /* The frequency of the RDTSC timer resolution */ - static uint64_t eal_tsc_resolution_hz; - -+/* Allow an override of the rte_delay_us function */ -+int rte_delay_us_override (unsigned us) __attribute__((weak)); -+ -+int -+rte_delay_us_override(__attribute__((unused)) unsigned us) -+{ -+ return 0; -+} -+ - void - rte_delay_us(unsigned us) - { -+ if (rte_delay_us_override(us)) -+ return; -+ - const uint64_t start = rte_get_timer_cycles(); - const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6; - while ((rte_get_timer_cycles() - start) < ticks) --- -2.2.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0008-Add-missing-init-of-packet_type-field.patch b/dpdk/dpdk-2.2.0_patches/0008-Add-missing-init-of-packet_type-field.patch deleted file mode 100644 index 04c1fb39..00000000 --- a/dpdk/dpdk-2.2.0_patches/0008-Add-missing-init-of-packet_type-field.patch +++ /dev/null @@ -1,70 +0,0 @@ -From 666ceb1d0c11e2ca69baacc272b7d5690d3f11a3 Mon Sep 17 00:00:00 2001 -From: Bud Grise -Date: Mon, 1 Feb 2016 14:28:01 -0500 -Subject: [PATCH 8/8] Add missing init of packet_type field. - -This can cause packets to be mishandled in systems with more than -one type of driver in use. - -Signed-off-by: Todd Foggoa (tfoggoa) ---- - drivers/net/e1000/em_rxtx.c | 2 ++ - drivers/net/virtio/virtio_rxtx.c | 2 ++ - drivers/net/vmxnet3/vmxnet3_rxtx.c | 1 + - 3 files changed, 5 insertions(+) - -diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c -index d8fb252..8796c8a 100644 ---- a/drivers/net/e1000/em_rxtx.c -+++ b/drivers/net/e1000/em_rxtx.c -@@ -799,6 +799,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - rxm->ol_flags = rx_desc_status_to_pkt_flags(status); - rxm->ol_flags = rxm->ol_flags | - rx_desc_error_to_pkt_flags(rxd.errors); -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); -@@ -1025,6 +1026,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - first_seg->ol_flags = rx_desc_status_to_pkt_flags(status); - first_seg->ol_flags = first_seg->ol_flags | - rx_desc_error_to_pkt_flags(rxd.errors); -+ first_seg->packet_type = RTE_PTYPE_UNKNOWN; - - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); -diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c -index 2512bce..a74c816 100644 ---- a/drivers/net/virtio/virtio_rxtx.c -+++ b/drivers/net/virtio/virtio_rxtx.c -@@ -619,6 +619,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxm->pkt_len = (uint32_t)(len[i] - hdr_size); - rxm->data_len = (uint16_t)(len[i] - hdr_size); - rxm->ol_flags = 0; -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - if (hw->vlan_strip) - rte_vlan_strip(rxm); -@@ -739,6 +740,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, - rxm->pkt_len = (uint32_t)(len[0] - hdr_size); - rxm->data_len = (uint16_t)(len[0] - hdr_size); - rxm->ol_flags = 0; -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - rxm->port = rxvq->port_id; - rx_pkts[nb_rx] = rxm; -diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c -index 4de5d89..c76b230 100644 ---- a/drivers/net/vmxnet3/vmxnet3_rxtx.c -+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c -@@ -640,6 +640,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxm->data_off = RTE_PKTMBUF_HEADROOM; - rxm->ol_flags = 0; - rxm->vlan_tci = 0; -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - vmxnet3_rx_offload(rcd, rxm); - --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0009-Temporarily-disable-unthrottled-log-message.patch b/dpdk/dpdk-2.2.0_patches/0009-Temporarily-disable-unthrottled-log-message.patch deleted file mode 100644 index 62132b13..00000000 --- a/dpdk/dpdk-2.2.0_patches/0009-Temporarily-disable-unthrottled-log-message.patch +++ /dev/null @@ -1,26 +0,0 @@ -From da141a8f16224a97a1a4093a3293f9bb6b15fa90 Mon Sep 17 00:00:00 2001 -From: Dave Barach -Date: Tue, 9 Feb 2016 10:22:39 -0500 -Subject: [PATCH] Temporarily disable unthrottled log message. - -Signed-off-by: Dave Barach ---- - lib/librte_eal/linuxapp/eal/eal_interrupts.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c -index 06b26a9..8d918a4 100644 ---- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c -+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c -@@ -711,6 +711,8 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds) - if (errno == EINTR || errno == EWOULDBLOCK) - continue; - -+ /* $$$ disable to avoid filling /var/log */ -+ if (0) - RTE_LOG(ERR, EAL, "Error reading from file " - "descriptor %d: %s\n", - events[n].data.fd, --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0010-enic-fix-dma-addr-of-outgoing-packets.patch b/dpdk/dpdk-2.2.0_patches/0010-enic-fix-dma-addr-of-outgoing-packets.patch deleted file mode 100644 index a524007c..00000000 --- a/dpdk/dpdk-2.2.0_patches/0010-enic-fix-dma-addr-of-outgoing-packets.patch +++ /dev/null @@ -1,28 +0,0 @@ -From c68ded695938b43682d4bd7dfaf40e5b267dfe3b Mon Sep 17 00:00:00 2001 -From: Yoann Desmouceaux -Date: Fri, 19 Feb 2016 12:49:29 +0100 -Subject: [PATCH] enic: fix dma addr of outgoing packets - -The enic PMD driver send function uses a constant offset instead of relying on the data_off in the mbuf to find the start of the packet. - -Signed-off-by: Yoann Desmouceaux ---- - drivers/net/enic/enic_main.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 07a9810..f818c32 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -166,7 +166,7 @@ void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, - uint16_t mss = 0; - uint8_t vlan_tag_insert = 0; - uint64_t bus_addr = (dma_addr_t) -- (tx_pkt->buf_physaddr + RTE_PKTMBUF_HEADROOM); -+ (tx_pkt->buf_physaddr + tx_pkt->data_off); - - if (sop) { - if (ol_flags & PKT_TX_VLAN_PKT) --- -2.1.4 - diff --git a/dpdk/dpdk-2.2.0_patches/0011-enic-improve-Rx-performance.patch b/dpdk/dpdk-2.2.0_patches/0011-enic-improve-Rx-performance.patch deleted file mode 100644 index 2aa4840d..00000000 --- a/dpdk/dpdk-2.2.0_patches/0011-enic-improve-Rx-performance.patch +++ /dev/null @@ -1,1349 +0,0 @@ -From 057358356e7d05f07ab2df37c12b1cce37a3cca9 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Fri, 4 Mar 2016 13:09:00 -0800 -Subject: [PATCH 11/22] enic: improve Rx performance - - This is a wholesale replacement of the Enic PMD receive path in order - to improve performance and code clarity. The changes are: - - Simplify and reduce code path length of receive function. - - Put most of the fast-path receive functions in one file. - - Reduce the number of posted_index updates (pay attention to - rx_free_thresh) - - Remove the unneeded container structure around the RQ mbuf ring - - Prefetch next Mbuf and descriptors while processing the current one - - Use a lookup table for converting CQ flags to mbuf flags. - - Signed-off-by: John Daley ---- - drivers/net/enic/Makefile | 1 + - drivers/net/enic/base/vnic_rq.c | 99 ++--------- - drivers/net/enic/base/vnic_rq.h | 147 +--------------- - drivers/net/enic/enic.h | 16 +- - drivers/net/enic/enic_ethdev.c | 27 ++- - drivers/net/enic/enic_main.c | 321 ++++++++++------------------------ - drivers/net/enic/enic_res.h | 16 +- - drivers/net/enic/enic_rx.c | 370 ++++++++++++++++++++++++++++++++++++++++ - 8 files changed, 511 insertions(+), 486 deletions(-) - create mode 100644 drivers/net/enic/enic_rx.c - -diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile -index f0ee093..f316274 100644 ---- a/drivers/net/enic/Makefile -+++ b/drivers/net/enic/Makefile -@@ -53,6 +53,7 @@ VPATH += $(SRCDIR)/src - # - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c -+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c -diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c -index 1441604..cb62c5e 100644 ---- a/drivers/net/enic/base/vnic_rq.c -+++ b/drivers/net/enic/base/vnic_rq.c -@@ -35,77 +35,21 @@ - #include "vnic_dev.h" - #include "vnic_rq.h" - --static int vnic_rq_alloc_bufs(struct vnic_rq *rq) --{ -- struct vnic_rq_buf *buf; -- unsigned int i, j, count = rq->ring.desc_count; -- unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count); -- -- for (i = 0; i < blks; i++) { -- rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); -- if (!rq->bufs[i]) -- return -ENOMEM; -- } -- -- for (i = 0; i < blks; i++) { -- buf = rq->bufs[i]; -- for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) { -- buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j; -- buf->desc = (u8 *)rq->ring.descs + -- rq->ring.desc_size * buf->index; -- if (buf->index + 1 == count) { -- buf->next = rq->bufs[0]; -- break; -- } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) { -- buf->next = rq->bufs[i + 1]; -- } else { -- buf->next = buf + 1; -- buf++; -- } -- } -- } -- -- rq->to_use = rq->to_clean = rq->bufs[0]; -- -- return 0; --} -- --int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count, -- unsigned int desc_size) --{ -- int mem_size = 0; -- -- mem_size += vnic_dev_desc_ring_size(&rq->ring, desc_count, desc_size); -- -- mem_size += VNIC_RQ_BUF_BLKS_NEEDED(rq->ring.desc_count) * -- VNIC_RQ_BUF_BLK_SZ(rq->ring.desc_count); -- -- return mem_size; --} -- - void vnic_rq_free(struct vnic_rq *rq) - { - struct vnic_dev *vdev; -- unsigned int i; - - vdev = rq->vdev; - - vnic_dev_free_desc_ring(vdev, &rq->ring); - -- for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) { -- if (rq->bufs[i]) { -- kfree(rq->bufs[i]); -- rq->bufs[i] = NULL; -- } -- } -- - rq->ctrl = NULL; - } - - int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, - unsigned int desc_count, unsigned int desc_size) - { -- int err; -+ int rc; - char res_name[NAME_MAX]; - static int instance; - -@@ -121,18 +65,9 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, - vnic_rq_disable(rq); - - snprintf(res_name, sizeof(res_name), "%d-rq-%d", instance++, index); -- err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size, -+ rc = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size, - rq->socket_id, res_name); -- if (err) -- return err; -- -- err = vnic_rq_alloc_bufs(rq); -- if (err) { -- vnic_rq_free(rq); -- return err; -- } -- -- return 0; -+ return rc; - } - - void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, -@@ -154,9 +89,6 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, - iowrite32(fetch_index, &rq->ctrl->fetch_index); - iowrite32(posted_index, &rq->ctrl->posted_index); - -- rq->to_use = rq->to_clean = -- &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] -- [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; - } - - void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, -@@ -176,6 +108,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, - fetch_index, fetch_index, - error_interrupt_enable, - error_interrupt_offset); -+ rq->rxst_idx = 0; -+ rq->tot_pkts = 0; - } - - void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error) -@@ -212,21 +146,20 @@ int vnic_rq_disable(struct vnic_rq *rq) - } - - void vnic_rq_clean(struct vnic_rq *rq, -- void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) -+ void (*buf_clean)(struct rte_mbuf **buf)) - { -- struct vnic_rq_buf *buf; -- u32 fetch_index; -+ struct rte_mbuf **buf; -+ u32 fetch_index, i; - unsigned int count = rq->ring.desc_count; - -- buf = rq->to_clean; -- -- while (vnic_rq_desc_used(rq) > 0) { -+ buf = &rq->mbuf_ring[0]; - -- (*buf_clean)(rq, buf); -- -- buf = rq->to_clean = buf->next; -- rq->ring.desc_avail++; -+ for (i = 0; i < count; i++) { -+ (*buf_clean)(buf); -+ buf++; - } -+ rq->ring.desc_avail = count - 1; -+ rq->rx_nb_hold = 0; - - /* Use current fetch_index as the ring starting point */ - fetch_index = ioread32(&rq->ctrl->fetch_index); -@@ -235,9 +168,7 @@ void vnic_rq_clean(struct vnic_rq *rq, - /* Hardware surprise removal: reset fetch_index */ - fetch_index = 0; - } -- rq->to_use = rq->to_clean = -- &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] -- [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; -+ - iowrite32(fetch_index, &rq->ctrl->posted_index); - - vnic_dev_clear_desc_ring(&rq->ring); -diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h -index 0f5c3c1..e083ccc 100644 ---- a/drivers/net/enic/base/vnic_rq.h -+++ b/drivers/net/enic/base/vnic_rq.h -@@ -66,42 +66,22 @@ struct vnic_rq_ctrl { - u32 pad10; - }; - --/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */ --#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32 --#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64 --#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \ -- ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \ -- VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES)) --#define VNIC_RQ_BUF_BLK_SZ(entries) \ -- (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf)) --#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ -- DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries)) --#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) -- --struct vnic_rq_buf { -- struct vnic_rq_buf *next; -- dma_addr_t dma_addr; -- void *os_buf; -- unsigned int os_buf_index; -- unsigned int len; -- unsigned int index; -- void *desc; -- uint64_t wr_id; --}; -- - struct vnic_rq { - unsigned int index; -+ unsigned int posted_index; - struct vnic_dev *vdev; -- struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ -+ struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ - struct vnic_dev_ring ring; -- struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX]; -- struct vnic_rq_buf *to_use; -- struct vnic_rq_buf *to_clean; -+ struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */ -+ unsigned int mbuf_next_idx; /* next mb to consume */ - void *os_buf_head; - unsigned int pkts_outstanding; -- -+ uint16_t rx_nb_hold; -+ uint16_t rx_free_thresh; - unsigned int socket_id; - struct rte_mempool *mp; -+ uint16_t rxst_idx; -+ uint32_t tot_pkts; - }; - - static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) -@@ -116,119 +96,13 @@ static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) - return rq->ring.desc_count - rq->ring.desc_avail - 1; - } - --static inline void *vnic_rq_next_desc(struct vnic_rq *rq) --{ -- return rq->to_use->desc; --} -- --static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) --{ -- return rq->to_use->index; --} -- --static inline void vnic_rq_post(struct vnic_rq *rq, -- void *os_buf, unsigned int os_buf_index, -- dma_addr_t dma_addr, unsigned int len, -- uint64_t wrid) --{ -- struct vnic_rq_buf *buf = rq->to_use; -- -- buf->os_buf = os_buf; -- buf->os_buf_index = os_buf_index; -- buf->dma_addr = dma_addr; -- buf->len = len; -- buf->wr_id = wrid; -- -- buf = buf->next; -- rq->to_use = buf; -- rq->ring.desc_avail--; -- -- /* Move the posted_index every nth descriptor -- */ -- --#ifndef VNIC_RQ_RETURN_RATE --#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */ --#endif -- -- if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { -- /* Adding write memory barrier prevents compiler and/or CPU -- * reordering, thus avoiding descriptor posting before -- * descriptor is initialized. Otherwise, hardware can read -- * stale descriptor fields. -- */ -- wmb(); -- iowrite32(buf->index, &rq->ctrl->posted_index); -- } --} -- --static inline void vnic_rq_post_commit(struct vnic_rq *rq, -- void *os_buf, unsigned int os_buf_index, -- dma_addr_t dma_addr, unsigned int len) --{ -- struct vnic_rq_buf *buf = rq->to_use; -- -- buf->os_buf = os_buf; -- buf->os_buf_index = os_buf_index; -- buf->dma_addr = dma_addr; -- buf->len = len; -- -- buf = buf->next; -- rq->to_use = buf; -- rq->ring.desc_avail--; -- -- /* Move the posted_index every descriptor -- */ -- -- /* Adding write memory barrier prevents compiler and/or CPU -- * reordering, thus avoiding descriptor posting before -- * descriptor is initialized. Otherwise, hardware can read -- * stale descriptor fields. -- */ -- wmb(); -- iowrite32(buf->index, &rq->ctrl->posted_index); --} - --static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) --{ -- rq->ring.desc_avail += count; --} - - enum desc_return_options { - VNIC_RQ_RETURN_DESC, - VNIC_RQ_DEFER_RETURN_DESC, - }; - --static inline int vnic_rq_service(struct vnic_rq *rq, -- struct cq_desc *cq_desc, u16 completed_index, -- int desc_return, int (*buf_service)(struct vnic_rq *rq, -- struct cq_desc *cq_desc, struct vnic_rq_buf *buf, -- int skipped, void *opaque), void *opaque) --{ -- struct vnic_rq_buf *buf; -- int skipped; -- int eop = 0; -- -- buf = rq->to_clean; -- while (1) { -- -- skipped = (buf->index != completed_index); -- -- if ((*buf_service)(rq, cq_desc, buf, skipped, opaque)) -- eop++; -- -- if (desc_return == VNIC_RQ_RETURN_DESC) -- rq->ring.desc_avail++; -- -- rq->to_clean = buf->next; -- -- if (!skipped) -- break; -- -- buf = rq->to_clean; -- } -- return eop; --} -- - static inline int vnic_rq_fill(struct vnic_rq *rq, - int (*buf_fill)(struct vnic_rq *rq)) - { -@@ -274,8 +148,5 @@ unsigned int vnic_rq_error_status(struct vnic_rq *rq); - void vnic_rq_enable(struct vnic_rq *rq); - int vnic_rq_disable(struct vnic_rq *rq); - void vnic_rq_clean(struct vnic_rq *rq, -- void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)); --int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count, -- unsigned int desc_size); -- -+ void (*buf_clean)(struct rte_mbuf **buf)); - #endif /* _VNIC_RQ_H_ */ -diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h -index 9e78305..8c914f5 100644 ---- a/drivers/net/enic/enic.h -+++ b/drivers/net/enic/enic.h -@@ -45,6 +45,7 @@ - #include "vnic_nic.h" - #include "vnic_rss.h" - #include "enic_res.h" -+#include "cq_enet_desc.h" - - #define DRV_NAME "enic_pmd" - #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver" -@@ -154,6 +155,16 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev) - return (struct enic *)eth_dev->data->dev_private; - } - -+#define RTE_LIBRTE_ENIC_ASSERT_ENABLE -+#ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE -+#define ASSERT(x) do { \ -+ if (!(x)) \ -+ rte_panic("ENIC: x"); \ -+} while (0) -+#else -+#define ASSERT(x) -+#endif -+ - extern void enic_fdir_stats_get(struct enic *enic, - struct rte_eth_fdir_stats *stats); - extern int enic_fdir_add_fltr(struct enic *enic, -@@ -193,9 +204,10 @@ extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, - uint16_t ol_flags, uint16_t vlan_tag); - - extern void enic_post_wq_index(struct vnic_wq *wq); --extern int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts, -- unsigned int budget, unsigned int *work_done); - extern int enic_probe(struct enic *enic); - extern int enic_clsf_init(struct enic *enic); - extern void enic_clsf_destroy(struct enic *enic); -+uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, -+ uint16_t nb_pkts); -+ - #endif /* _ENIC_H_ */ -diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c -index 2a88043..6f2ada5 100644 ---- a/drivers/net/enic/enic_ethdev.c -+++ b/drivers/net/enic/enic_ethdev.c -@@ -255,7 +255,7 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, - uint16_t queue_idx, - uint16_t nb_desc, - unsigned int socket_id, -- __rte_unused const struct rte_eth_rxconf *rx_conf, -+ const struct rte_eth_rxconf *rx_conf, - struct rte_mempool *mp) - { - int ret; -@@ -270,6 +270,10 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, - return ret; - } - -+ enic->rq[queue_idx].rx_free_thresh = rx_conf->rx_free_thresh; -+ dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx, -+ enic->rq[queue_idx].rx_free_thresh); -+ - return enicpmd_dev_setup_intr(enic); - } - -@@ -429,6 +433,9 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM; -+ device_info->default_rxconf = (struct rte_eth_rxconf) { -+ .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH -+ }; - } - - static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) -@@ -538,18 +545,6 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - return index; - } - --static uint16_t enicpmd_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, -- uint16_t nb_pkts) --{ -- struct vnic_rq *rq = (struct vnic_rq *)rx_queue; -- unsigned int work_done; -- -- if (enic_poll(rq, rx_pkts, (unsigned int)nb_pkts, &work_done)) -- dev_err(enic, "error in enicpmd poll\n"); -- -- return work_done; --} -- - static const struct eth_dev_ops enicpmd_eth_dev_ops = { - .dev_configure = enicpmd_dev_configure, - .dev_start = enicpmd_dev_start, -@@ -606,7 +601,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) - enic->port_id = eth_dev->data->port_id; - enic->rte_dev = eth_dev; - eth_dev->dev_ops = &enicpmd_eth_dev_ops; -- eth_dev->rx_pkt_burst = &enicpmd_recv_pkts; -+ eth_dev->rx_pkt_burst = &enic_recv_pkts; - eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts; - - pdev = eth_dev->pci_dev; -@@ -635,8 +630,8 @@ static struct eth_driver rte_enic_pmd = { - * Register as the [Poll Mode] Driver of Cisco ENIC device. - */ - static int --rte_enic_pmd_init(const char *name __rte_unused, -- const char *params __rte_unused) -+rte_enic_pmd_init(__rte_unused const char *name, -+ __rte_unused const char *params) - { - ENICPMD_FUNC_TRACE(); - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index f818c32..9fff020 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -60,6 +60,17 @@ - #include "vnic_nic.h" - #include "enic_vnic_wq.h" - -+static inline struct rte_mbuf * -+rte_rxmbuf_alloc(struct rte_mempool *mp) -+{ -+ struct rte_mbuf *m; -+ -+ m = __rte_mbuf_raw_alloc(mp); -+ __rte_mbuf_sanity_check_raw(m, 0); -+ return m; -+} -+ -+ - static inline int enic_is_sriov_vf(struct enic *enic) - { - return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; -@@ -80,16 +91,25 @@ static int is_eth_addr_valid(uint8_t *addr) - return !is_mcast_addr(addr) && !is_zero_addr(addr); - } - --static inline struct rte_mbuf * --enic_rxmbuf_alloc(struct rte_mempool *mp) -+static void -+enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) - { -- struct rte_mbuf *m; -+ uint16_t i; - -- m = __rte_mbuf_raw_alloc(mp); -- __rte_mbuf_sanity_check_raw(m, 0); -- return m; -+ if (!rq || !rq->mbuf_ring) { -+ dev_debug(enic, "Pointer to rq or mbuf_ring is NULL"); -+ return; -+ } -+ -+ for (i = 0; i < enic->config.rq_desc_count; i++) { -+ if (rq->mbuf_ring[i]) { -+ rte_pktmbuf_free_seg(rq->mbuf_ring[i]); -+ rq->mbuf_ring[i] = NULL; -+ } -+ } - } - -+ - void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size) - { - vnic_set_hdr_split_size(enic->vdev, split_hdr_size); -@@ -262,13 +282,13 @@ void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) - } - - static void --enic_free_rq_buf(__rte_unused struct vnic_rq *rq, struct vnic_rq_buf *buf) -+enic_free_rq_buf(struct rte_mbuf **mbuf) - { -- if (!buf->os_buf) -+ if (*mbuf == NULL) - return; - -- rte_pktmbuf_free((struct rte_mbuf *)buf->os_buf); -- buf->os_buf = NULL; -+ rte_pktmbuf_free(*mbuf); -+ mbuf = NULL; - } - - void enic_init_vnic_resources(struct enic *enic) -@@ -314,221 +334,47 @@ void enic_init_vnic_resources(struct enic *enic) - } - - --static int enic_rq_alloc_buf(struct vnic_rq *rq) -+static int -+enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) - { -- struct enic *enic = vnic_dev_priv(rq->vdev); -+ struct rte_mbuf *mb; -+ struct rq_enet_desc *rqd = rq->ring.descs; -+ unsigned i; - dma_addr_t dma_addr; -- struct rq_enet_desc *desc = vnic_rq_next_desc(rq); -- uint8_t type = RQ_ENET_TYPE_ONLY_SOP; -- u16 split_hdr_size = vnic_get_hdr_split_size(enic->vdev); -- struct rte_mbuf *mbuf = enic_rxmbuf_alloc(rq->mp); -- struct rte_mbuf *hdr_mbuf = NULL; -- -- if (!mbuf) { -- dev_err(enic, "mbuf alloc in enic_rq_alloc_buf failed\n"); -- return -1; -- } -- -- if (unlikely(split_hdr_size)) { -- if (vnic_rq_desc_avail(rq) < 2) { -- rte_mempool_put(mbuf->pool, mbuf); -- return -1; -- } -- hdr_mbuf = enic_rxmbuf_alloc(rq->mp); -- if (!hdr_mbuf) { -- rte_mempool_put(mbuf->pool, mbuf); -- dev_err(enic, -- "hdr_mbuf alloc in enic_rq_alloc_buf failed\n"); -- return -1; -- } -- -- hdr_mbuf->data_off = RTE_PKTMBUF_HEADROOM; -- -- hdr_mbuf->nb_segs = 2; -- hdr_mbuf->port = enic->port_id; -- hdr_mbuf->next = mbuf; -- -- dma_addr = (dma_addr_t) -- (hdr_mbuf->buf_physaddr + hdr_mbuf->data_off); -- -- rq_enet_desc_enc(desc, dma_addr, type, split_hdr_size); - -- vnic_rq_post(rq, (void *)hdr_mbuf, 0 /*os_buf_index*/, dma_addr, -- (unsigned int)split_hdr_size, 0 /*wrid*/); -+ dev_debug(enic, "queue %u, allocating %u rx queue mbufs", rq->index, -+ rq->ring.desc_count); - -- desc = vnic_rq_next_desc(rq); -- type = RQ_ENET_TYPE_NOT_SOP; -- } else { -- mbuf->nb_segs = 1; -- mbuf->port = enic->port_id; -- } -- -- mbuf->data_off = RTE_PKTMBUF_HEADROOM; -- mbuf->next = NULL; -- -- dma_addr = (dma_addr_t) -- (mbuf->buf_physaddr + mbuf->data_off); -- -- rq_enet_desc_enc(desc, dma_addr, type, mbuf->buf_len); -- -- vnic_rq_post(rq, (void *)mbuf, 0 /*os_buf_index*/, dma_addr, -- (unsigned int)mbuf->buf_len, 0 /*wrid*/); -- -- return 0; --} -- --static int enic_rq_indicate_buf(struct vnic_rq *rq, -- struct cq_desc *cq_desc, struct vnic_rq_buf *buf, -- int skipped, void *opaque) --{ -- struct enic *enic = vnic_dev_priv(rq->vdev); -- struct rte_mbuf **rx_pkt_bucket = (struct rte_mbuf **)opaque; -- struct rte_mbuf *rx_pkt = NULL; -- struct rte_mbuf *hdr_rx_pkt = NULL; -- -- u8 type, color, eop, sop, ingress_port, vlan_stripped; -- u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; -- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; -- u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; -- u8 packet_error; -- u16 q_number, completed_index, bytes_written, vlan_tci, checksum; -- u32 rss_hash; -- -- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, -- &type, &color, &q_number, &completed_index, -- &ingress_port, &fcoe, &eop, &sop, &rss_type, -- &csum_not_calc, &rss_hash, &bytes_written, -- &packet_error, &vlan_stripped, &vlan_tci, &checksum, -- &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, -- &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, -- &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, -- &fcs_ok); -- -- rx_pkt = (struct rte_mbuf *)buf->os_buf; -- buf->os_buf = NULL; -- -- if (unlikely(packet_error)) { -- dev_err(enic, "packet error\n"); -- rx_pkt->data_len = 0; -- return 0; -- } -- -- if (unlikely(skipped)) { -- rx_pkt->data_len = 0; -- return 0; -- } -- -- if (likely(!vnic_get_hdr_split_size(enic->vdev))) { -- /* No header split configured */ -- *rx_pkt_bucket = rx_pkt; -- rx_pkt->pkt_len = bytes_written; -- -- if (ipv4) { -- rx_pkt->packet_type = RTE_PTYPE_L3_IPV4; -- if (!csum_not_calc) { -- if (unlikely(!ipv4_csum_ok)) -- rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD; -- -- if ((tcp || udp) && (!tcp_udp_csum_ok)) -- rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD; -- } -- } else if (ipv6) -- rx_pkt->packet_type = RTE_PTYPE_L3_IPV6; -- } else { -- /* Header split */ -- if (sop && !eop) { -- /* This piece is header */ -- *rx_pkt_bucket = rx_pkt; -- rx_pkt->pkt_len = bytes_written; -- } else { -- if (sop && eop) { -- /* The packet is smaller than split_hdr_size */ -- *rx_pkt_bucket = rx_pkt; -- rx_pkt->pkt_len = bytes_written; -- if (ipv4) { -- rx_pkt->packet_type = RTE_PTYPE_L3_IPV4; -- if (!csum_not_calc) { -- if (unlikely(!ipv4_csum_ok)) -- rx_pkt->ol_flags |= -- PKT_RX_IP_CKSUM_BAD; -- -- if ((tcp || udp) && -- (!tcp_udp_csum_ok)) -- rx_pkt->ol_flags |= -- PKT_RX_L4_CKSUM_BAD; -- } -- } else if (ipv6) -- rx_pkt->packet_type = RTE_PTYPE_L3_IPV6; -- } else { -- /* Payload */ -- hdr_rx_pkt = *rx_pkt_bucket; -- hdr_rx_pkt->pkt_len += bytes_written; -- if (ipv4) { -- hdr_rx_pkt->packet_type = -- RTE_PTYPE_L3_IPV4; -- if (!csum_not_calc) { -- if (unlikely(!ipv4_csum_ok)) -- hdr_rx_pkt->ol_flags |= -- PKT_RX_IP_CKSUM_BAD; -- -- if ((tcp || udp) && -- (!tcp_udp_csum_ok)) -- hdr_rx_pkt->ol_flags |= -- PKT_RX_L4_CKSUM_BAD; -- } -- } else if (ipv6) -- hdr_rx_pkt->packet_type = -- RTE_PTYPE_L3_IPV6; -- } -+ for (i = 0; i < rq->ring.desc_count; i++, rqd++) { -+ mb = rte_rxmbuf_alloc(rq->mp); -+ if (mb == NULL) { -+ dev_err(enic, "RX mbuf alloc failed queue_id=%u", -+ (unsigned)rq->index); -+ return -ENOMEM; - } -- } - -- rx_pkt->data_len = bytes_written; -+ dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off); - -- if (rss_hash) { -- rx_pkt->ol_flags |= PKT_RX_RSS_HASH; -- rx_pkt->hash.rss = rss_hash; -+ rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP, -+ mb->buf_len); -+ rq->mbuf_ring[i] = mb; - } - -- if (vlan_tci) { -- rx_pkt->ol_flags |= PKT_RX_VLAN_PKT; -- rx_pkt->vlan_tci = vlan_tci; -- } -+ /* make sure all prior writes are complete before doing the PIO write */ -+ rte_rmb(); - -- return eop; --} -+ /* Post all but the last 2 cache lines' worth of descriptors */ -+ rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE -+ / sizeof(struct rq_enet_desc)); -+ rq->rx_nb_hold = 0; - --static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, -- __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque) --{ -- struct enic *enic = vnic_dev_priv(vdev); -- -- return vnic_rq_service(&enic->rq[q_number], cq_desc, -- completed_index, VNIC_RQ_RETURN_DESC, -- enic_rq_indicate_buf, opaque); -- --} -+ dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", -+ enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold); -+ iowrite32(rq->posted_index, &rq->ctrl->posted_index); -+ rte_rmb(); - --int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts, -- unsigned int budget, unsigned int *work_done) --{ -- struct enic *enic = vnic_dev_priv(rq->vdev); -- unsigned int cq = enic_cq_rq(enic, rq->index); -- int err = 0; -- -- *work_done = vnic_cq_service(&enic->cq[cq], -- budget, enic_rq_service, (void *)rx_pkts); -- -- if (*work_done) { -- vnic_rq_fill(rq, enic_rq_alloc_buf); -+ return 0; - -- /* Need at least one buffer on ring to get going */ -- if (vnic_rq_desc_used(rq) == 0) { -- dev_err(enic, "Unable to alloc receive buffers\n"); -- err = -1; -- } -- } -- return err; - } - - static void * -@@ -576,6 +422,7 @@ enic_intr_handler(__rte_unused struct rte_intr_handle *handle, - int enic_enable(struct enic *enic) - { - unsigned int index; -+ int err; - struct rte_eth_dev *eth_dev = enic->rte_dev; - - eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); -@@ -586,15 +433,11 @@ int enic_enable(struct enic *enic) - dev_warning(enic, "Init of hash table for clsf failed."\ - "Flow director feature will not work\n"); - -- /* Fill RQ bufs */ - for (index = 0; index < enic->rq_count; index++) { -- vnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf); -- -- /* Need at least one buffer on ring to get going -- */ -- if (vnic_rq_desc_used(&enic->rq[index]) == 0) { -- dev_err(enic, "Unable to alloc receive buffers\n"); -- return -1; -+ err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]); -+ if (err) { -+ dev_err(enic, "Failed to alloc RX queue mbufs\n"); -+ return err; - } - } - -@@ -636,6 +479,9 @@ void enic_free_rq(void *rxq) - struct vnic_rq *rq = (struct vnic_rq *)rxq; - struct enic *enic = vnic_dev_priv(rq->vdev); - -+ enic_rxmbuf_queue_release(enic, rq); -+ rte_free(rq->mbuf_ring); -+ rq->mbuf_ring = NULL; - vnic_rq_free(rq); - vnic_cq_free(&enic->cq[rq->index]); - } -@@ -664,7 +510,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, - unsigned int socket_id, struct rte_mempool *mp, - uint16_t nb_desc) - { -- int err; -+ int rc; - struct vnic_rq *rq = &enic->rq[queue_idx]; - - rq->socket_id = socket_id; -@@ -687,23 +533,35 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, - } - - /* Allocate queue resources */ -- err = vnic_rq_alloc(enic->vdev, &enic->rq[queue_idx], queue_idx, -- enic->config.rq_desc_count, -- sizeof(struct rq_enet_desc)); -- if (err) { -+ rc = vnic_rq_alloc(enic->vdev, rq, queue_idx, -+ enic->config.rq_desc_count, sizeof(struct rq_enet_desc)); -+ if (rc) { - dev_err(enic, "error in allocation of rq\n"); -- return err; -+ goto err_exit; - } - -- err = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx, -+ rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx, - socket_id, enic->config.rq_desc_count, - sizeof(struct cq_enet_rq_desc)); -- if (err) { -- vnic_rq_free(rq); -+ if (rc) { - dev_err(enic, "error in allocation of cq for rq\n"); -+ goto err_free_rq_exit; - } - -- return err; -+ /* Allocate the mbuf ring */ -+ rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring", -+ sizeof(struct rte_mbuf *) * enic->config.rq_desc_count, -+ RTE_CACHE_LINE_SIZE, rq->socket_id); -+ -+ if (rq->mbuf_ring != NULL) -+ return 0; -+ -+ /* cleanup on error */ -+ vnic_cq_free(&enic->cq[queue_idx]); -+err_free_rq_exit: -+ vnic_rq_free(rq); -+err_exit: -+ return -ENOMEM; - } - - void enic_free_wq(void *txq) -@@ -790,6 +648,7 @@ int enic_disable(struct enic *enic) - - for (i = 0; i < enic->wq_count; i++) - vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); -+ - for (i = 0; i < enic->rq_count; i++) - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); - for (i = 0; i < enic->cq_count; i++) -@@ -1074,7 +933,7 @@ int enic_probe(struct enic *enic) - - /* Set ingress vlan rewrite mode before vnic initialization */ - err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, -- IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN); -+ IG_VLAN_REWRITE_MODE_PASS_THRU); - if (err) { - dev_err(enic, - "Failed to set ingress vlan rewrite mode, aborting.\n"); -diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h -index 49f7e22..33f2e84 100644 ---- a/drivers/net/enic/enic_res.h -+++ b/drivers/net/enic/enic_res.h -@@ -52,6 +52,7 @@ - #define ENIC_UNICAST_PERFECT_FILTERS 32 - - #define ENIC_NON_TSO_MAX_DESC 16 -+#define ENIC_DEFAULT_RX_FREE_THRESH 32 - - #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) - -@@ -133,21 +134,6 @@ static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, - WQ_ENET_OFFLOAD_MODE_TSO, - eop, 1 /* SOP */, eop, loopback); - } --static inline void enic_queue_rq_desc(struct vnic_rq *rq, -- void *os_buf, unsigned int os_buf_index, -- dma_addr_t dma_addr, unsigned int len) --{ -- struct rq_enet_desc *desc = vnic_rq_next_desc(rq); -- u64 wrid = 0; -- u8 type = os_buf_index ? -- RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP; -- -- rq_enet_desc_enc(desc, -- (u64)dma_addr | VNIC_PADDR_TARGET, -- type, (u16)len); -- -- vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid); --} - - struct enic; - -diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c -new file mode 100644 -index 0000000..945a60f ---- /dev/null -+++ b/drivers/net/enic/enic_rx.c -@@ -0,0 +1,370 @@ -+/* -+ * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. -+ * Copyright 2007 Nuova Systems, Inc. All rights reserved. -+ * -+ * Copyright (c) 2014, Cisco Systems, Inc. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * 1. Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * -+ * 2. Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ * -+ */ -+ -+#include -+#include -+#include -+ -+#include "enic_compat.h" -+#include "rq_enet_desc.h" -+#include "enic.h" -+ -+#define RTE_PMD_USE_PREFETCH -+ -+#ifdef RTE_PMD_USE_PREFETCH -+/* -+ * Prefetch a cache line into all cache levels. -+ */ -+#define rte_enic_prefetch(p) rte_prefetch0(p) -+#else -+#define rte_enic_prefetch(p) do {} while (0) -+#endif -+ -+#ifdef RTE_PMD_PACKET_PREFETCH -+#define rte_packet_prefetch(p) rte_prefetch1(p) -+#else -+#define rte_packet_prefetch(p) do {} while (0) -+#endif -+ -+static inline struct rte_mbuf * -+rte_rxmbuf_alloc(struct rte_mempool *mp) -+{ -+ struct rte_mbuf *m; -+ -+ m = __rte_mbuf_raw_alloc(mp); -+ __rte_mbuf_sanity_check_raw(m, 0); -+ return m; -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) -+{ -+ return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) -+{ -+ return(le16_to_cpu(crd->bytes_written_flags) & -+ ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_packet_error(uint16_t bwflags) -+{ -+ return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == -+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_eop(uint16_t ciflags) -+{ -+ return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) -+ == CQ_ENET_RQ_DESC_FLAGS_EOP; -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) -+{ -+ return ((le16_to_cpu(cqrd->q_number_rss_type_flags) & -+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == -+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) -+{ -+ return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == -+ CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) -+{ -+ return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == -+ CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) -+{ -+ return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> -+ CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); -+} -+ -+static inline uint32_t -+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) -+{ -+ return le32_to_cpu(cqrd->rss_hash); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_fcs_ok(struct cq_enet_rq_desc *cqrd) -+{ -+ return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) == -+ CQ_ENET_RQ_DESC_FLAGS_FCS_OK); -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) -+{ -+ return le16_to_cpu(cqrd->vlan); -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ return le16_to_cpu(cqrd->bytes_written_flags) & -+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; -+} -+ -+static inline uint64_t -+enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ uint16_t bwflags; -+ uint64_t pkt_err_flags = 0; -+ -+ bwflags = enic_cq_rx_desc_bwflags(cqrd); -+ -+ /* Check for packet error. Can't be more specific than MAC error */ -+ if (enic_cq_rx_desc_packet_error(bwflags)) { -+ pkt_err_flags |= PKT_RX_MAC_ERR; -+ } -+ -+ /* Check for bad FCS. MAC error isn't quite, but no other choice */ -+ if (!enic_cq_rx_desc_fcs_ok(cqrd)) { -+ pkt_err_flags |= PKT_RX_MAC_ERR; -+ } -+ return pkt_err_flags; -+} -+ -+/* -+ * Lookup table to translate RX CQ flags to mbuf flags. -+ */ -+static inline uint32_t -+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ uint8_t cqrd_flags = cqrd->flags; -+ static const uint32_t cq_type_table[128] __rte_cache_aligned = { -+ [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, -+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -+ | RTE_PTYPE_L4_UDP, -+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -+ | RTE_PTYPE_L4_TCP, -+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -+ | RTE_PTYPE_L4_FRAG, -+ [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, -+ [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -+ | RTE_PTYPE_L4_UDP, -+ [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -+ | RTE_PTYPE_L4_TCP, -+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -+ | RTE_PTYPE_L4_FRAG, -+ /* All others reserved */ -+ }; -+ cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT -+ | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 -+ | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; -+ return cq_type_table[cqrd_flags]; -+} -+ -+static inline void -+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ uint16_t ciflags, bwflags, pkt_flags = 0; -+ ciflags = enic_cq_rx_desc_ciflags(cqrd); -+ bwflags = enic_cq_rx_desc_bwflags(cqrd); -+ -+ ASSERT(mbuf->ol_flags == 0); -+ -+ /* flags are meaningless if !EOP */ -+ if (unlikely(!enic_cq_rx_desc_eop(ciflags))) -+ goto mbuf_flags_done; -+ -+ /* VLAN stripping */ -+ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { -+ pkt_flags |= PKT_RX_VLAN_PKT; -+ mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); -+ } else { -+ mbuf->vlan_tci = 0; -+ } -+ -+ /* RSS flag */ -+ if (enic_cq_rx_desc_rss_type(cqrd)) { -+ pkt_flags |= PKT_RX_RSS_HASH; -+ mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); -+ } -+ -+ /* checksum flags */ -+ if (!enic_cq_rx_desc_csum_not_calc(cqrd) && -+ (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) { -+ if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd))) -+ pkt_flags |= PKT_RX_IP_CKSUM_BAD; -+ if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { -+ if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))) -+ pkt_flags |= PKT_RX_L4_CKSUM_BAD; -+ } -+ } -+ -+ mbuf_flags_done: -+ mbuf->ol_flags = pkt_flags; -+} -+ -+static inline uint32_t -+enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) -+{ -+ uint32_t d = i0 + i1; -+ ASSERT(i0 < n_descriptors); -+ ASSERT(i1 < n_descriptors); -+ d -= (d >= n_descriptors) ? n_descriptors : 0; -+ return d; -+} -+ -+ -+uint16_t -+enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, -+ uint16_t nb_pkts) -+{ -+ struct vnic_rq *rq = rx_queue; -+ struct enic *enic = vnic_dev_priv(rq->vdev); -+ unsigned int rx_id; -+ struct rte_mbuf *nmb, *rxmb; -+ uint16_t nb_rx = 0; -+ uint16_t nb_hold; -+ struct vnic_cq *cq; -+ volatile struct cq_desc *cqd_ptr; -+ uint8_t color; -+ -+ cq = &enic->cq[enic_cq_rq(enic, rq->index)]; -+ rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */ -+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -+ -+ nb_hold = rq->rx_nb_hold; /* mbufs held by software */ -+ -+ while (nb_rx < nb_pkts) { -+ uint16_t rx_pkt_len; -+ volatile struct rq_enet_desc *rqd_ptr; -+ dma_addr_t dma_addr; -+ struct cq_desc cqd; -+ uint64_t ol_err_flags; -+ -+ /* Check for pkts available */ -+ color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) -+ & CQ_DESC_COLOR_MASK; -+ if (color == cq->last_color) -+ break; -+ -+ /* Get the cq descriptor and rq pointer */ -+ cqd = *cqd_ptr; -+ rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id; -+ -+ /* allocate a new mbuf */ -+ nmb = rte_rxmbuf_alloc(rq->mp); -+ if (nmb == NULL) { -+ dev_err(enic, "RX mbuf alloc failed port=%u qid=%u", -+ enic->port_id, (unsigned)rq->index); -+ rte_eth_devices[enic->port_id]. -+ data->rx_mbuf_alloc_failed++; -+ break; -+ } -+ -+ /* Check for FCS or packet errors */ -+ ol_err_flags = enic_cq_rx_to_pkt_err_flags(&cqd); -+ if (ol_err_flags == 0) -+ rx_pkt_len = enic_cq_rx_desc_n_bytes(&cqd); -+ else -+ rx_pkt_len = 0; -+ -+ /* Get the mbuf to return and replace with one just allocated */ -+ rxmb = rq->mbuf_ring[rx_id]; -+ rq->mbuf_ring[rx_id] = nmb; -+ -+ /* Increment cqd, rqd, mbuf_table index */ -+ rx_id++; -+ if (unlikely(rx_id == rq->ring.desc_count)) { -+ rx_id = 0; -+ cq->last_color = cq->last_color ? 0 : 1; -+ } -+ -+ /* Prefetch next mbuf & desc while processing current one */ -+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -+ rte_enic_prefetch(cqd_ptr); -+ rte_enic_prefetch(rq->mbuf_ring[rx_id]); -+ rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs) -+ + rx_id); -+ -+ /* Push descriptor for newly allocated mbuf */ -+ dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off); -+ rqd_ptr->address = rte_cpu_to_le_64(dma_addr); -+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len); -+ -+ /* Fill in the rest of the mbuf */ -+ rxmb->data_off = RTE_PKTMBUF_HEADROOM; -+ rxmb->nb_segs = 1; -+ rxmb->next = NULL; -+ rxmb->pkt_len = rx_pkt_len; -+ rxmb->data_len = rx_pkt_len; -+ rxmb->port = enic->port_id; -+ rxmb->ol_flags = ol_err_flags; -+ if (!ol_err_flags) -+ enic_cq_rx_to_pkt_flags(&cqd, rxmb); -+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); -+ -+ /* prefetch mbuf data for caller */ -+ rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, -+ RTE_PKTMBUF_HEADROOM)); -+ -+ /* store the mbuf address into the next entry of the array */ -+ rx_pkts[nb_rx++] = rxmb; -+ } -+ -+ nb_hold += nb_rx; -+ cq->to_clean = rx_id; -+ -+ if (nb_hold > rq->rx_free_thresh) { -+ rq->posted_index = enic_ring_add(rq->ring.desc_count, -+ rq->posted_index, nb_hold); -+ nb_hold = 0; -+ rte_mb(); -+ iowrite32(rq->posted_index, &rq->ctrl->posted_index); -+ } -+ -+ rq->rx_nb_hold = nb_hold; -+ -+ return nb_rx; -+} --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0012-enic-fix-last-packet-not-being-sent.patch b/dpdk/dpdk-2.2.0_patches/0012-enic-fix-last-packet-not-being-sent.patch deleted file mode 100644 index 218a42f6..00000000 --- a/dpdk/dpdk-2.2.0_patches/0012-enic-fix-last-packet-not-being-sent.patch +++ /dev/null @@ -1,39 +0,0 @@ -From a31a1dbdf5e1ff46d04f50fea02e83453b84652c Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Tue, 8 Mar 2016 10:49:07 -0800 -Subject: [PATCH 12/22] enic: fix last packet not being sent - - The last packet of the tx burst function array was not being - emitted until the subsequent call. The nic descriptor index - was being set to the current tx descriptor instead of one past - the descriptor as required by the nic. - - Fixes: d739ba4c6abf ("enic: improve Tx packet rate") - - Signed-off-by: John Daley ---- - drivers/net/enic/base/enic_vnic_wq.h | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h -index e3ea574..b019109 100644 ---- a/drivers/net/enic/base/enic_vnic_wq.h -+++ b/drivers/net/enic/base/enic_vnic_wq.h -@@ -69,11 +69,11 @@ static inline void enic_vnic_post_wq(struct vnic_wq *wq, - buf->wr_id = wrid; - - buf = buf->next; -- if (cq_entry) -- enic_vnic_post_wq_index(wq); -+ wq->ring.desc_avail -= desc_skip_cnt; - wq->to_use = buf; - -- wq->ring.desc_avail -= desc_skip_cnt; -+ if (cq_entry) -+ enic_vnic_post_wq_index(wq); - } - - #endif /* _ENIC_VNIC_WQ_H_ */ --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0013-enic-add-missing-newline-to-print-statements.patch b/dpdk/dpdk-2.2.0_patches/0013-enic-add-missing-newline-to-print-statements.patch deleted file mode 100644 index 97a424f1..00000000 --- a/dpdk/dpdk-2.2.0_patches/0013-enic-add-missing-newline-to-print-statements.patch +++ /dev/null @@ -1,47 +0,0 @@ -From a1ed99bc24f88f061d75eed0db84dc6355855dd2 Mon Sep 17 00:00:00 2001 -From: Nelson Escobar -Date: Thu, 17 Mar 2016 15:48:13 -0700 -Subject: [PATCH 13/22] enic: add missing newline to print statements - - Add the missing '\n' character to the end of a few print statements. - - Fixes: fefed3d1e62c ("enic: new driver") - - Signed-off-by: Nelson Escobar - Acked-by: John Daley ---- - drivers/net/enic/enic_main.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 9fff020..e30672c 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -342,13 +342,13 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) - unsigned i; - dma_addr_t dma_addr; - -- dev_debug(enic, "queue %u, allocating %u rx queue mbufs", rq->index, -+ dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index, - rq->ring.desc_count); - - for (i = 0; i < rq->ring.desc_count; i++, rqd++) { - mb = rte_rxmbuf_alloc(rq->mp); - if (mb == NULL) { -- dev_err(enic, "RX mbuf alloc failed queue_id=%u", -+ dev_err(enic, "RX mbuf alloc failed queue_id=%u\n", - (unsigned)rq->index); - return -ENOMEM; - } -@@ -388,7 +388,7 @@ enic_alloc_consistent(__rte_unused void *priv, size_t size, - rz = rte_memzone_reserve_aligned((const char *)name, - size, SOCKET_ID_ANY, 0, ENIC_ALIGN); - if (!rz) { -- pr_err("%s : Failed to allocate memory requested for %s", -+ pr_err("%s : Failed to allocate memory requested for %s\n", - __func__, name); - return NULL; - } --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0014-vmxnet3-support-jumbo-frames.patch b/dpdk/dpdk-2.2.0_patches/0014-vmxnet3-support-jumbo-frames.patch deleted file mode 100644 index cae055eb..00000000 --- a/dpdk/dpdk-2.2.0_patches/0014-vmxnet3-support-jumbo-frames.patch +++ /dev/null @@ -1,171 +0,0 @@ -From fef2b892245d5a2f3c68d2e03a6c5f2a40205cf7 Mon Sep 17 00:00:00 2001 -From: Steve Shin -Date: Wed, 23 Mar 2016 09:54:54 -0700 -Subject: [PATCH 14/22] vmxnet3: support jumbo frames - ---- - drivers/net/vmxnet3/vmxnet3_ethdev.c | 3 +- - drivers/net/vmxnet3/vmxnet3_ring.h | 2 + - drivers/net/vmxnet3/vmxnet3_rxtx.c | 77 ++++++++++++++++++++++-------------- - 3 files changed, 52 insertions(+), 30 deletions(-) - -diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c -index c363bf6..b78acd4 100644 ---- a/drivers/net/vmxnet3/vmxnet3_ethdev.c -+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c -@@ -425,6 +425,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) - { - struct rte_eth_conf port_conf = dev->data->dev_conf; - struct vmxnet3_hw *hw = dev->data->dev_private; -+ uint32_t mtu = dev->data->mtu; - Vmxnet3_DriverShared *shared = hw->shared; - Vmxnet3_DSDevRead *devRead = &shared->devRead; - uint32_t *mac_ptr; -@@ -442,7 +443,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) - devRead->misc.driverInfo.vmxnet3RevSpt = 1; - devRead->misc.driverInfo.uptVerSpt = 1; - -- devRead->misc.mtu = rte_le_to_cpu_32(dev->data->mtu); -+ devRead->misc.mtu = rte_le_to_cpu_32(mtu); - devRead->misc.queueDescPA = hw->queueDescPA; - devRead->misc.queueDescLen = hw->queue_desc_len; - devRead->misc.numTxQueues = hw->num_tx_queues; -diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h -index 612487e..b1582f8 100644 ---- a/drivers/net/vmxnet3/vmxnet3_ring.h -+++ b/drivers/net/vmxnet3/vmxnet3_ring.h -@@ -171,6 +171,8 @@ typedef struct vmxnet3_rx_queue { - uint32_t qid1; - uint32_t qid2; - Vmxnet3_RxQueueDesc *shared; -+ struct rte_mbuf *start_seg; -+ struct rte_mbuf *last_seg; - struct vmxnet3_rxq_stats stats; - bool stopped; - uint16_t queue_id; /**< Device RX queue index. */ -diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c -index c76b230..59b6db8 100644 ---- a/drivers/net/vmxnet3/vmxnet3_rxtx.c -+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c -@@ -547,7 +547,6 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - vmxnet3_rx_queue_t *rxq; - Vmxnet3_RxCompDesc *rcd; - vmxnet3_buf_info_t *rbi; -- Vmxnet3_RxDesc *rxd; - struct rte_mbuf *rxm = NULL; - struct vmxnet3_hw *hw; - -@@ -572,37 +571,16 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - - idx = rcd->rxdIdx; - ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1); -- rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx; - rbi = rxq->cmd_ring[ring_idx].buf_info + idx; - -- if (unlikely(rcd->sop != 1 || rcd->eop != 1)) { -- rte_pktmbuf_free_seg(rbi->m); -- PMD_RX_LOG(DEBUG, "Packet spread across multiple buffers\n)"); -- goto rcd_done; -- } -- - PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx); - -+ #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER -+ Vmxnet3_RxDesc *rxd -+ = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx; - VMXNET3_ASSERT(rcd->len <= rxd->len); - VMXNET3_ASSERT(rbi->m); -- -- if (unlikely(rcd->len == 0)) { -- PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d]\n)", -- ring_idx, idx); -- VMXNET3_ASSERT(rcd->sop && rcd->eop); -- rte_pktmbuf_free_seg(rbi->m); -- goto rcd_done; -- } -- -- /* Assuming a packet is coming in a single packet buffer */ -- if (unlikely(rxd->btype != VMXNET3_RXD_BTYPE_HEAD)) { -- PMD_RX_LOG(DEBUG, -- "Alert : Misbehaving device, incorrect " -- " buffer type used. iPacket dropped."); -- rte_pktmbuf_free_seg(rbi->m); -- goto rcd_done; -- } -- VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD); -+ #endif - - /* Get the packet buffer pointer from buf_info */ - rxm = rbi->m; -@@ -615,7 +593,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxq->cmd_ring[ring_idx].next2comp = idx; - - /* For RCD with EOP set, check if there is frame error */ -- if (unlikely(rcd->err)) { -+ if (unlikely(rcd->eop && rcd->err)) { - rxq->stats.drop_total++; - rxq->stats.drop_err++; - -@@ -642,9 +620,49 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxm->vlan_tci = 0; - rxm->packet_type = RTE_PTYPE_UNKNOWN; - -- vmxnet3_rx_offload(rcd, rxm); -+ /* -+ * If this is the first buffer of the received packet, -+ * set the pointer to the first mbuf of the packet -+ * Otherwise, update the total length and the number of segments -+ * of the current scattered packet, and update the pointer to -+ * the last mbuf of the current packet. -+ */ -+ if (rcd->sop) { -+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER -+ VMXNET3_ASSERT(!rxq->start_seg); -+ VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD); -+#endif -+ -+ if (unlikely(rcd->len == 0)) { -+ PMD_RX_LOG(DEBUG, -+ "Rx buf was skipped. rxring[%d][%d])", -+ ring_idx, idx); -+ rte_pktmbuf_free_seg(rbi->m); -+ goto rcd_done; -+ } -+ -+ rxq->start_seg = rxm; -+ vmxnet3_rx_offload(rcd, rxm); -+ } else { -+ struct rte_mbuf *start = rxq->start_seg; -+ -+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER -+ VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY); -+ VMXNET3_ASSERT(start != NULL); -+#endif -+ -+ start->pkt_len += rxm->data_len; -+ start->nb_segs++; -+ -+ rxq->last_seg->next = rxm; -+ } -+ rxq->last_seg = rxm; -+ -+ if (rcd->eop) { -+ rx_pkts[nb_rx++] = rxq->start_seg; -+ rxq->start_seg = NULL; -+ } - -- rx_pkts[nb_rx++] = rxm; - rcd_done: - rxq->cmd_ring[ring_idx].next2comp = idx; - VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size); -@@ -945,6 +963,7 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev) - } - } - rxq->stopped = FALSE; -+ rxq->start_seg = NULL; - } - - for (i = 0; i < dev->data->nb_tx_queues; i++) { --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0015-enic-fix-crash-when-allocating-too-many-queues.patch b/dpdk/dpdk-2.2.0_patches/0015-enic-fix-crash-when-allocating-too-many-queues.patch deleted file mode 100644 index 2c0e65d3..00000000 --- a/dpdk/dpdk-2.2.0_patches/0015-enic-fix-crash-when-allocating-too-many-queues.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 7a7fa2891df4ec4af0c34f3bbd203e1376e83951 Mon Sep 17 00:00:00 2001 -From: Nelson Escobar -Date: Thu, 17 Mar 2016 15:49:58 -0700 -Subject: [PATCH 15/22] enic: fix crash when allocating too many queues - - Add checks to make sure we don't try to allocate more tx or rx queues - than we support. - - Fixes: fefed3d1e62c ("enic: new driver") - - Signed-off-by: Nelson Escobar - Reviewed-by: John Daley ---- - drivers/net/enic/enic_ethdev.c | 14 ++++++++++++++ - 1 file changed, 14 insertions(+) - -diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c -index 6f2ada5..6c3c734 100644 ---- a/drivers/net/enic/enic_ethdev.c -+++ b/drivers/net/enic/enic_ethdev.c -@@ -174,6 +174,13 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, - struct enic *enic = pmd_priv(eth_dev); - - ENICPMD_FUNC_TRACE(); -+ if (queue_idx >= ENIC_WQ_MAX) { -+ dev_err(enic, -+ "Max number of TX queues exceeded. Max is %d\n", -+ ENIC_WQ_MAX); -+ return -EINVAL; -+ } -+ - eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx]; - - ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); -@@ -262,6 +269,13 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, - struct enic *enic = pmd_priv(eth_dev); - - ENICPMD_FUNC_TRACE(); -+ if (queue_idx >= ENIC_RQ_MAX) { -+ dev_err(enic, -+ "Max number of RX queues exceeded. Max is %d\n", -+ ENIC_RQ_MAX); -+ return -EINVAL; -+ } -+ - eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx]; - - ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc); --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0016-enic-fix-mbuf-flags-on-Rx.patch b/dpdk/dpdk-2.2.0_patches/0016-enic-fix-mbuf-flags-on-Rx.patch deleted file mode 100644 index 895d5715..00000000 --- a/dpdk/dpdk-2.2.0_patches/0016-enic-fix-mbuf-flags-on-Rx.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 3ffb9431d6ba34dbcffab5cff4c060d5dca167e1 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Thu, 17 Mar 2016 15:57:05 -0700 -Subject: [PATCH 16/22] enic: fix mbuf flags on Rx - - In the receive path, the function to set mbuf ol_flags used the - mbuf packet_type before it was set. - - Fixes: 947d860c821f ("enic: improve Rx performance") - - Signed-off-by: John Daley ---- - drivers/net/enic/enic_rx.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c -index 945a60f..59ebaa4 100644 ---- a/drivers/net/enic/enic_rx.c -+++ b/drivers/net/enic/enic_rx.c -@@ -210,7 +210,7 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) - ciflags = enic_cq_rx_desc_ciflags(cqrd); - bwflags = enic_cq_rx_desc_bwflags(cqrd); - -- ASSERT(mbuf->ol_flags == 0); -+ mbuf->ol_flags = 0; - - /* flags are meaningless if !EOP */ - if (unlikely(!enic_cq_rx_desc_eop(ciflags))) -@@ -340,10 +340,10 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - rxmb->pkt_len = rx_pkt_len; - rxmb->data_len = rx_pkt_len; - rxmb->port = enic->port_id; -+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); - rxmb->ol_flags = ol_err_flags; - if (!ol_err_flags) - enic_cq_rx_to_pkt_flags(&cqd, rxmb); -- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); - - /* prefetch mbuf data for caller */ - rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0017-enic-fix-error-packets-handling.patch b/dpdk/dpdk-2.2.0_patches/0017-enic-fix-error-packets-handling.patch deleted file mode 100644 index 28c05e88..00000000 --- a/dpdk/dpdk-2.2.0_patches/0017-enic-fix-error-packets-handling.patch +++ /dev/null @@ -1,117 +0,0 @@ -From 678e5952cf49bb66c2d697581a70dc8c7d703e8f Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Thu, 17 Mar 2016 15:57:06 -0700 -Subject: [PATCH 17/22] enic: fix error packets handling - - If the packet_error bit in the completion descriptor is set, the - remainder of the descriptor and data are invalid. PKT_RX_MAC_ERR - was set in the mbuf->ol_flags if packet_error was set and used - later to indicate an error packet. But since PKT_RX_MAC_ERR is - defined as 0, mbuf flags and packet types and length were being - misinterpreted. - - Make the function enic_cq_rx_to_pkt_err_flags() return true for error - packets and use the return value instead of mbuf->ol_flags to indicate - error packets. Also remove warning for error packets and rely on - rx_error stats. - - Fixes: 947d860c821f ("enic: improve Rx performance") - - Signed-off-by: John Daley ---- - drivers/net/enic/enic_rx.c | 43 ++++++++++++++++++------------------------- - 1 file changed, 18 insertions(+), 25 deletions(-) - -diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c -index 59ebaa4..817a891 100644 ---- a/drivers/net/enic/enic_rx.c -+++ b/drivers/net/enic/enic_rx.c -@@ -129,13 +129,6 @@ enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) - return le32_to_cpu(cqrd->rss_hash); - } - --static inline uint8_t --enic_cq_rx_desc_fcs_ok(struct cq_enet_rq_desc *cqrd) --{ -- return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) == -- CQ_ENET_RQ_DESC_FLAGS_FCS_OK); --} -- - static inline uint16_t - enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) - { -@@ -150,25 +143,21 @@ enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) - CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; - } - --static inline uint64_t --enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd) -+static inline uint8_t -+enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out) - { - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - uint16_t bwflags; -+ int ret = 0; - uint64_t pkt_err_flags = 0; - - bwflags = enic_cq_rx_desc_bwflags(cqrd); -- -- /* Check for packet error. Can't be more specific than MAC error */ -- if (enic_cq_rx_desc_packet_error(bwflags)) { -- pkt_err_flags |= PKT_RX_MAC_ERR; -- } -- -- /* Check for bad FCS. MAC error isn't quite, but no other choice */ -- if (!enic_cq_rx_desc_fcs_ok(cqrd)) { -- pkt_err_flags |= PKT_RX_MAC_ERR; -+ if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) { -+ pkt_err_flags = PKT_RX_MAC_ERR; -+ ret = 1; - } -- return pkt_err_flags; -+ *pkt_err_flags_out = pkt_err_flags; -+ return ret; - } - - /* -@@ -282,6 +271,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - dma_addr_t dma_addr; - struct cq_desc cqd; - uint64_t ol_err_flags; -+ uint8_t packet_error; - - /* Check for pkts available */ - color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) -@@ -303,9 +293,9 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - break; - } - -- /* Check for FCS or packet errors */ -- ol_err_flags = enic_cq_rx_to_pkt_err_flags(&cqd); -- if (ol_err_flags == 0) -+ /* A packet error means descriptor and data are untrusted */ -+ packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags); -+ if (!packet_error) - rx_pkt_len = enic_cq_rx_desc_n_bytes(&cqd); - else - rx_pkt_len = 0; -@@ -340,10 +330,13 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - rxmb->pkt_len = rx_pkt_len; - rxmb->data_len = rx_pkt_len; - rxmb->port = enic->port_id; -- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); -- rxmb->ol_flags = ol_err_flags; -- if (!ol_err_flags) -+ if (!packet_error) { -+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); - enic_cq_rx_to_pkt_flags(&cqd, rxmb); -+ } else { -+ rxmb->packet_type = 0; -+ rxmb->ol_flags = 0; -+ } - - /* prefetch mbuf data for caller */ - rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0018-enic-remove-packet-error-conditional.patch b/dpdk/dpdk-2.2.0_patches/0018-enic-remove-packet-error-conditional.patch deleted file mode 100644 index 3f29f6ea..00000000 --- a/dpdk/dpdk-2.2.0_patches/0018-enic-remove-packet-error-conditional.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 2fa6a45ff9f9fb3108b09403e32393416bd0a732 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Thu, 17 Mar 2016 15:57:07 -0700 -Subject: [PATCH 18/22] enic: remove packet error conditional - - small cleanup to remove conditional. - - Signed-off-by: John Daley ---- - drivers/net/enic/enic_rx.c | 10 +++------- - 1 file changed, 3 insertions(+), 7 deletions(-) - -diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c -index 817a891..232987a 100644 ---- a/drivers/net/enic/enic_rx.c -+++ b/drivers/net/enic/enic_rx.c -@@ -266,7 +266,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - nb_hold = rq->rx_nb_hold; /* mbufs held by software */ - - while (nb_rx < nb_pkts) { -- uint16_t rx_pkt_len; - volatile struct rq_enet_desc *rqd_ptr; - dma_addr_t dma_addr; - struct cq_desc cqd; -@@ -295,10 +294,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - - /* A packet error means descriptor and data are untrusted */ - packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags); -- if (!packet_error) -- rx_pkt_len = enic_cq_rx_desc_n_bytes(&cqd); -- else -- rx_pkt_len = 0; - - /* Get the mbuf to return and replace with one just allocated */ - rxmb = rq->mbuf_ring[rx_id]; -@@ -327,16 +322,17 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - rxmb->data_off = RTE_PKTMBUF_HEADROOM; - rxmb->nb_segs = 1; - rxmb->next = NULL; -- rxmb->pkt_len = rx_pkt_len; -- rxmb->data_len = rx_pkt_len; - rxmb->port = enic->port_id; - if (!packet_error) { -+ rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd); - rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); - enic_cq_rx_to_pkt_flags(&cqd, rxmb); - } else { -+ rxmb->pkt_len = 0; - rxmb->packet_type = 0; - rxmb->ol_flags = 0; - } -+ rxmb->data_len = rxmb->pkt_len; - - /* prefetch mbuf data for caller */ - rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0019-enic-update-maintainers.patch b/dpdk/dpdk-2.2.0_patches/0019-enic-update-maintainers.patch deleted file mode 100644 index 74151772..00000000 --- a/dpdk/dpdk-2.2.0_patches/0019-enic-update-maintainers.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 8ad252ab40b8f95db8413220146d54bf8a7d7be8 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Fri, 18 Mar 2016 11:27:07 -0700 -Subject: [PATCH 19/22] enic: update maintainers - - Change maintainers for ENIC PMD and fix pointer to enic - documentation in MAINTAINERS. - - Signed-off-by: John Daley ---- - MAINTAINERS | 3 ++- - doc/guides/nics/enic.rst | 2 +- - 2 files changed, 3 insertions(+), 2 deletions(-) - -diff --git a/MAINTAINERS b/MAINTAINERS -index b90aeea..f5b8bb4 100644 ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -264,8 +264,9 @@ F: doc/guides/nics/cxgbe.rst - - Cisco enic - M: John Daley --M: Sujith Sankar -+M: Nelson Escobar - F: drivers/net/enic/ -+F: doc/guides/nics/enic.rst - - Combo szedata2 - M: Matej Vido -diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst -index 2a228fd..e67c3db 100644 ---- a/doc/guides/nics/enic.rst -+++ b/doc/guides/nics/enic.rst -@@ -218,4 +218,4 @@ Any questions or bugs should be reported to DPDK community and to the ENIC PMD - maintainers: - - - John Daley --- Sujith Sankar -+- Nelson Escobar --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0020-enic-fix-Rx-descriptor-limit.patch b/dpdk/dpdk-2.2.0_patches/0020-enic-fix-Rx-descriptor-limit.patch deleted file mode 100644 index db2ac646..00000000 --- a/dpdk/dpdk-2.2.0_patches/0020-enic-fix-Rx-descriptor-limit.patch +++ /dev/null @@ -1,66 +0,0 @@ -From ce6badc60736f5e78a295f30fe84c3e40ad0c330 Mon Sep 17 00:00:00 2001 -From: Nelson Escobar -Date: Fri, 18 Mar 2016 11:33:34 -0700 -Subject: [PATCH 20/22] enic: fix Rx descriptor limit - - On initialization, the rq descriptor count was set to the limit - of the vic. When the requested number of rx descriptors was - less than this count, enic_alloc_rq() was incorrectly setting - the count to the lower value. This results in later calls to - enic_alloc_rq() incorrectly using the lower value as the adapter - limit. - - Fixes: fefed3d1e62c ("enic: new driver") - - Signed-off-by: Nelson Escobar - Reviewed-by: John Daley ---- - drivers/net/enic/enic_main.c | 14 ++++++-------- - 1 file changed, 6 insertions(+), 8 deletions(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index e30672c..2f79cf0 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -524,24 +524,22 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, - "policy. Applying the value in the adapter "\ - "policy (%d).\n", - queue_idx, nb_desc, enic->config.rq_desc_count); -- } else if (nb_desc != enic->config.rq_desc_count) { -- enic->config.rq_desc_count = nb_desc; -- dev_info(enic, -- "RX Queues - effective number of descs:%d\n", -- nb_desc); -+ nb_desc = enic->config.rq_desc_count; - } -+ dev_info(enic, "RX Queues - effective number of descs:%d\n", -+ nb_desc); - } - - /* Allocate queue resources */ - rc = vnic_rq_alloc(enic->vdev, rq, queue_idx, -- enic->config.rq_desc_count, sizeof(struct rq_enet_desc)); -+ nb_desc, sizeof(struct rq_enet_desc)); - if (rc) { - dev_err(enic, "error in allocation of rq\n"); - goto err_exit; - } - - rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx, -- socket_id, enic->config.rq_desc_count, -+ socket_id, nb_desc, - sizeof(struct cq_enet_rq_desc)); - if (rc) { - dev_err(enic, "error in allocation of cq for rq\n"); -@@ -550,7 +548,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, - - /* Allocate the mbuf ring */ - rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring", -- sizeof(struct rte_mbuf *) * enic->config.rq_desc_count, -+ sizeof(struct rte_mbuf *) * nb_desc, - RTE_CACHE_LINE_SIZE, rq->socket_id); - - if (rq->mbuf_ring != NULL) --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0021-enic-fix-TX-hang-when-number-of-packets-queue-size.patch b/dpdk/dpdk-2.2.0_patches/0021-enic-fix-TX-hang-when-number-of-packets-queue-size.patch deleted file mode 100644 index 154e6f1e..00000000 --- a/dpdk/dpdk-2.2.0_patches/0021-enic-fix-TX-hang-when-number-of-packets-queue-size.patch +++ /dev/null @@ -1,89 +0,0 @@ -From e89ea2a038987102d9eb0a7ea217d7a301b484cb Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Thu, 24 Mar 2016 14:00:39 -0700 -Subject: [PATCH 21/22] enic: fix TX hang when number of packets > queue - size - - If the nb_pkts parameter to rte_eth_tx_burst() was greater than - the TX descriptor count, a completion was not being requested - from the NIC, so descriptors would not be released back to the - host causing a lock-up. - - Introduce a limit of how many TX descriptors can be used in a single - call to the enic PMD burst TX function before requesting a completion. - - Fixes: d739ba4c6abf ("enic: improve Tx packet rate") - - Signed-off-by: John Daley ---- - drivers/net/enic/enic_ethdev.c | 20 ++++++++++++++++---- - drivers/net/enic/enic_res.h | 1 + - 2 files changed, 17 insertions(+), 4 deletions(-) - -diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c -index 6c3c734..61bb83c 100644 ---- a/drivers/net/enic/enic_ethdev.c -+++ b/drivers/net/enic/enic_ethdev.c -@@ -510,7 +510,7 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui - static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) - { -- unsigned int index; -+ uint16_t index; - unsigned int frags; - unsigned int pkt_len; - unsigned int seg_len; -@@ -522,6 +522,7 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - unsigned short vlan_id; - unsigned short ol_flags; - uint8_t last_seg, eop; -+ unsigned int host_tx_descs = 0; - - for (index = 0; index < nb_pkts; index++) { - tx_pkt = *tx_pkts++; -@@ -537,6 +538,7 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - return index; - } - } -+ - pkt_len = tx_pkt->pkt_len; - vlan_id = tx_pkt->vlan_tci; - ol_flags = tx_pkt->ol_flags; -@@ -546,9 +548,19 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - next_tx_pkt = tx_pkt->next; - seg_len = tx_pkt->data_len; - inc_len += seg_len; -- eop = (pkt_len == inc_len) || (!next_tx_pkt); -- last_seg = eop && -- (index == ((unsigned int)nb_pkts - 1)); -+ -+ host_tx_descs++; -+ last_seg = 0; -+ eop = 0; -+ if ((pkt_len == inc_len) || !next_tx_pkt) { -+ eop = 1; -+ /* post if last packet in batch or > thresh */ -+ if ((index == (nb_pkts - 1)) || -+ (host_tx_descs > ENIC_TX_POST_THRESH)) { -+ last_seg = 1; -+ host_tx_descs = 0; -+ } -+ } - enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len, - !frags, eop, last_seg, ol_flags, vlan_id); - tx_pkt = next_tx_pkt; -diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h -index 33f2e84..00fa71d 100644 ---- a/drivers/net/enic/enic_res.h -+++ b/drivers/net/enic/enic_res.h -@@ -53,6 +53,7 @@ - - #define ENIC_NON_TSO_MAX_DESC 16 - #define ENIC_DEFAULT_RX_FREE_THRESH 32 -+#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2) - - #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) - --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0022-bonding-fix-bond-link-detect-in-non-interrupt-mode.patch b/dpdk/dpdk-2.2.0_patches/0022-bonding-fix-bond-link-detect-in-non-interrupt-mode.patch deleted file mode 100644 index 3e03c896..00000000 --- a/dpdk/dpdk-2.2.0_patches/0022-bonding-fix-bond-link-detect-in-non-interrupt-mode.patch +++ /dev/null @@ -1,76 +0,0 @@ -From a2f08a919c72af29c56b937e6c92eb104037fed5 Mon Sep 17 00:00:00 2001 -From: Nelson Escobar -Date: Tue, 22 Mar 2016 13:42:08 -0700 -Subject: [PATCH 22/22] bonding: fix bond link detect in non-interrupt mode - - Stopping then re-starting a bond interface containing slaves that - used polling for link detection caused the bond to think all slave - links were down and inactive. - - Move the start of the polling for link from slave_add() to - bond_ethdev_start() and in bond_ethdev_stop() make sure we clear - the last_link_status of the slaves. - - Signed-off-by: Nelson Escobar - Signed-off-by: John Daley ---- - drivers/net/bonding/rte_eth_bond_pmd.c | 27 +++++++++++++++++---------- - 1 file changed, 17 insertions(+), 10 deletions(-) - -diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c -index b1373c6..d32c6f5 100644 ---- a/drivers/net/bonding/rte_eth_bond_pmd.c -+++ b/drivers/net/bonding/rte_eth_bond_pmd.c -@@ -1447,18 +1447,11 @@ slave_add(struct bond_dev_private *internals, - slave_details->port_id = slave_eth_dev->data->port_id; - slave_details->last_link_status = 0; - -- /* If slave device doesn't support interrupts then we need to enabled -- * polling to monitor link status */ -+ /* Mark slave devices that don't support interrupts so we can -+ * compensate when we start the bond -+ */ - if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { - slave_details->link_status_poll_enabled = 1; -- -- if (!internals->link_status_polling_enabled) { -- internals->link_status_polling_enabled = 1; -- -- rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000, -- bond_ethdev_slave_link_status_change_monitor, -- (void *)&rte_eth_devices[internals->port_id]); -- } - } - - slave_details->link_status_wait_to_complete = 0; -@@ -1543,6 +1536,18 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) - eth_dev->data->port_id, internals->slaves[i].port_id); - return -1; - } -+ /* We will need to poll for link status if any slave doesn't -+ * support interrupts -+ */ -+ if (internals->slaves[i].link_status_poll_enabled) -+ internals->link_status_polling_enabled = 1; -+ } -+ /* start polling if needed */ -+ if (internals->link_status_polling_enabled) { -+ rte_eal_alarm_set( -+ internals->link_status_polling_interval_ms * 1000, -+ bond_ethdev_slave_link_status_change_monitor, -+ (void *)&rte_eth_devices[internals->port_id]); - } - - if (internals->user_defined_primary_port) -@@ -1615,6 +1620,8 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) - - internals->active_slave_count = 0; - internals->link_status_polling_enabled = 0; -+ for (i = 0; i < internals->slave_count; i++) -+ internals->slaves[i].last_link_status = 0; - - eth_dev->data->dev_link.link_status = 0; - eth_dev->data->dev_started = 0; --- -1.9.1 - diff --git a/dpdk/dpdk-2.2.0_patches/0023-enic-expose-RX-missed-packets-counter.patch b/dpdk/dpdk-2.2.0_patches/0023-enic-expose-RX-missed-packets-counter.patch deleted file mode 100644 index 53f3eaa7..00000000 --- a/dpdk/dpdk-2.2.0_patches/0023-enic-expose-RX-missed-packets-counter.patch +++ /dev/null @@ -1,27 +0,0 @@ -commit 7182d3e7d17722d088322695fc09f0d3bb7f1eab -Author: John Daley -Date: Wed Mar 30 11:07:31 2016 -0700 - - enic: expose Rx missed packets counter - - Update the 'imissed' counter with the number of packets dropped - by the NIC. - - Fixes: fefed3d1e62c ("enic: new driver") - - Signed-off-by: John Daley - Reviewed-by: Nelson Escobar - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 2f79cf0..e3da51d 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -246,6 +246,8 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) - r_stats->ierrors = stats->rx.rx_errors; - r_stats->oerrors = stats->tx.tx_errors; - -+ r_stats->imissed = stats->rx.rx_drop; -+ - r_stats->imcasts = stats->rx.rx_multicast_frames_ok; - r_stats->rx_nombuf = stats->rx.rx_no_bufs; - } diff --git a/dpdk/dpdk-2.2.0_patches/0024-enic-fix-imissed-rx-counter.patch b/dpdk/dpdk-2.2.0_patches/0024-enic-fix-imissed-rx-counter.patch deleted file mode 100644 index 81e7bf3d..00000000 --- a/dpdk/dpdk-2.2.0_patches/0024-enic-fix-imissed-rx-counter.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 3433c7828ec909fccb768636ee21867030da14c9 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Tue, 26 Apr 2016 13:30:50 -0700 -Subject: [PATCH 1/3] enic: fix 'imissed' to count drops due to lack of RX - buffers - -Fixes: 7182d3e7d177 ("enic: expose Rx missed packets counter") -Signed-off-by: John Daley ---- - drivers/net/enic/enic_main.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 60fe765..be4e9e5 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -243,10 +243,10 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) - r_stats->ibytes = stats->rx.rx_bytes_ok; - r_stats->obytes = stats->tx.tx_bytes_ok; - -- r_stats->ierrors = stats->rx.rx_errors; -+ r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop; - r_stats->oerrors = stats->tx.tx_errors; - -- r_stats->imissed = stats->rx.rx_drop; -+ r_stats->imissed = stats->rx.rx_no_bufs; - - r_stats->rx_nombuf = stats->rx.rx_no_bufs; - } --- -2.7.0 - diff --git a/dpdk/dpdk-2.2.0_patches/0025-enic-fix-misalignment-of-Rx-mbuf-data.patch b/dpdk/dpdk-2.2.0_patches/0025-enic-fix-misalignment-of-Rx-mbuf-data.patch deleted file mode 100644 index 69ca3f31..00000000 --- a/dpdk/dpdk-2.2.0_patches/0025-enic-fix-misalignment-of-Rx-mbuf-data.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 454eb71eca1912e32a509c738a99a340cc2488cf Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Mon, 25 Apr 2016 16:24:53 -0700 -Subject: [PATCH 2/3] enic: fix misalignment of Rx mbuf data - -Data DMA used m->data_off of uninitialized mbufs instead of -RTE_PKTMBUF_HEADROOM, potentially causing Rx data to be -placed at the wrong alignment in the mbuf. - -Fixes: 947d860c821f ("enic: improve Rx performance") -Signed-off-by: John Daley ---- - drivers/net/enic/enic_main.c | 5 +++-- - drivers/net/enic/enic_rx.c | 6 ++++-- - 2 files changed, 7 insertions(+), 4 deletions(-) - -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index be4e9e5..646d87f 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -354,10 +354,11 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) - return -ENOMEM; - } - -- dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off); -+ dma_addr = (dma_addr_t)(mb->buf_physaddr -+ + RTE_PKTMBUF_HEADROOM); - - rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP, -- mb->buf_len); -+ mb->buf_len - RTE_PKTMBUF_HEADROOM); - rq->mbuf_ring[i] = mb; - } - -diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c -index 232987a..39bb55c 100644 ---- a/drivers/net/enic/enic_rx.c -+++ b/drivers/net/enic/enic_rx.c -@@ -314,9 +314,11 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - + rx_id); - - /* Push descriptor for newly allocated mbuf */ -- dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off); -+ dma_addr = (dma_addr_t)(nmb->buf_physaddr -+ + RTE_PKTMBUF_HEADROOM); - rqd_ptr->address = rte_cpu_to_le_64(dma_addr); -- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len); -+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len -+ - RTE_PKTMBUF_HEADROOM); - - /* Fill in the rest of the mbuf */ - rxmb->data_off = RTE_PKTMBUF_HEADROOM; --- -2.7.0 - diff --git a/dpdk/dpdk-2.2.0_patches/0026-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch b/dpdk/dpdk-2.2.0_patches/0026-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch deleted file mode 100644 index 4858b8f5..00000000 --- a/dpdk/dpdk-2.2.0_patches/0026-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch +++ /dev/null @@ -1,1844 +0,0 @@ -From ca6bbb723880e91d006de6cc485259da988859aa Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Tue, 5 Apr 2016 15:19:06 -0700 -Subject: [PATCH 3/3] enic: Optimization of Tx path to reduce Host CPU - overhead, cleanup - -Optimizations and cleanup: -- flatten packet send path -- flatten mbuf free path -- disable CQ entry writing and use CQ messages instead -- use rte_mempool_put_bulk() to bulk return freed mbufs -- remove unnecessary fields vnic_bufs struct, use contiguous array of cache - aligned divisible elements. No next pointers. -- use local variables inside per packet loop instead of fields in structs. -- factor book keeping out of the per packet tx loop where possible - (removed several conditionals) -- put Tx and Rx code in 1 file (enic_rxtx.c) - -Reviewed-by: Nelson Escobar -Signed-off-by: John Daley ---- - drivers/net/enic/Makefile | 2 +- - drivers/net/enic/base/enic_vnic_wq.h | 79 ------ - drivers/net/enic/base/vnic_cq.h | 37 +-- - drivers/net/enic/base/vnic_rq.h | 2 +- - drivers/net/enic/base/vnic_wq.c | 89 +++--- - drivers/net/enic/base/vnic_wq.h | 113 +------- - drivers/net/enic/enic.h | 27 +- - drivers/net/enic/enic_ethdev.c | 67 +---- - drivers/net/enic/enic_main.c | 132 +++------ - drivers/net/enic/enic_res.h | 81 +----- - drivers/net/enic/enic_rx.c | 361 ------------------------- - drivers/net/enic/enic_rxtx.c | 505 +++++++++++++++++++++++++++++++++++ - 12 files changed, 635 insertions(+), 860 deletions(-) - delete mode 100644 drivers/net/enic/base/enic_vnic_wq.h - delete mode 100644 drivers/net/enic/enic_rx.c - create mode 100644 drivers/net/enic/enic_rxtx.c - -diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile -index f316274..3926b79 100644 ---- a/drivers/net/enic/Makefile -+++ b/drivers/net/enic/Makefile -@@ -53,7 +53,7 @@ VPATH += $(SRCDIR)/src - # - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c --SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c -+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c - SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c -diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h -deleted file mode 100644 -index b019109..0000000 ---- a/drivers/net/enic/base/enic_vnic_wq.h -+++ /dev/null -@@ -1,79 +0,0 @@ --/* -- * Copyright 2008-2015 Cisco Systems, Inc. All rights reserved. -- * Copyright 2007 Nuova Systems, Inc. All rights reserved. -- * -- * Copyright (c) 2015, Cisco Systems, Inc. -- * All rights reserved. -- * -- * Redistribution and use in source and binary forms, with or without -- * modification, are permitted provided that the following conditions -- * are met: -- * -- * 1. Redistributions of source code must retain the above copyright -- * notice, this list of conditions and the following disclaimer. -- * -- * 2. Redistributions in binary form must reproduce the above copyright -- * notice, this list of conditions and the following disclaimer in -- * the documentation and/or other materials provided with the -- * distribution. -- * -- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -- * POSSIBILITY OF SUCH DAMAGE. -- * -- */ -- --#ifndef _ENIC_VNIC_WQ_H_ --#define _ENIC_VNIC_WQ_H_ -- --#include "vnic_dev.h" --#include "vnic_cq.h" -- --static inline void enic_vnic_post_wq_index(struct vnic_wq *wq) --{ -- struct vnic_wq_buf *buf = wq->to_use; -- -- /* Adding write memory barrier prevents compiler and/or CPU -- * reordering, thus avoiding descriptor posting before -- * descriptor is initialized. Otherwise, hardware can read -- * stale descriptor fields. -- */ -- wmb(); -- iowrite32(buf->index, &wq->ctrl->posted_index); --} -- --static inline void enic_vnic_post_wq(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, -- unsigned int len, int sop, -- uint8_t desc_skip_cnt, uint8_t cq_entry, -- uint8_t compressed_send, uint64_t wrid) --{ -- struct vnic_wq_buf *buf = wq->to_use; -- -- buf->sop = sop; -- buf->cq_entry = cq_entry; -- buf->compressed_send = compressed_send; -- buf->desc_skip_cnt = desc_skip_cnt; -- buf->os_buf = os_buf; -- buf->dma_addr = dma_addr; -- buf->len = len; -- buf->wr_id = wrid; -- -- buf = buf->next; -- wq->ring.desc_avail -= desc_skip_cnt; -- wq->to_use = buf; -- -- if (cq_entry) -- enic_vnic_post_wq_index(wq); --} -- --#endif /* _ENIC_VNIC_WQ_H_ */ -diff --git a/drivers/net/enic/base/vnic_cq.h b/drivers/net/enic/base/vnic_cq.h -index 922391b..ffc1aaa 100644 ---- a/drivers/net/enic/base/vnic_cq.h -+++ b/drivers/net/enic/base/vnic_cq.h -@@ -96,41 +96,46 @@ static inline unsigned int vnic_cq_service(struct vnic_cq *cq, - u8 type, u16 q_number, u16 completed_index, void *opaque), - void *opaque) - { -- struct cq_desc *cq_desc; -+ struct cq_desc *cq_desc, *cq_desc_last; - unsigned int work_done = 0; - u16 q_number, completed_index; -- u8 type, color; -- struct rte_mbuf **rx_pkts = opaque; -- unsigned int ret; -+ u8 type, color, type_color; - - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); -- cq_desc_dec(cq_desc, &type, &color, -- &q_number, &completed_index); -+ -+ type_color = cq_desc->type_color; -+ color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; -+ if (color == cq->last_color) -+ return 0; - - while (color != cq->last_color) { -- if (opaque) -- opaque = (void *)&(rx_pkts[work_done]); -+ cq_desc_last = cq_desc; - -- ret = (*q_service)(cq->vdev, cq_desc, type, -- q_number, completed_index, opaque); - cq->to_clean++; - if (cq->to_clean == cq->ring.desc_count) { - cq->to_clean = 0; - cq->last_color = cq->last_color ? 0 : 1; - } - -+ work_done++; -+ if (work_done >= work_to_do) -+ break; -+ - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); -- cq_desc_dec(cq_desc, &type, &color, -- &q_number, &completed_index); - -- if (ret) -- work_done++; -- if (work_done >= work_to_do) -- break; -+ type_color = cq_desc->type_color; -+ color = (type_color >> CQ_DESC_COLOR_SHIFT) -+ & CQ_DESC_COLOR_MASK; -+ - } - -+ cq_desc_dec(cq_desc_last, &type, &color, -+ &q_number, &completed_index); -+ -+ (*q_service)(cq->vdev, cq_desc, type, -+ q_number, completed_index, opaque); - return work_done; - } - -diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h -index e083ccc..424415c 100644 ---- a/drivers/net/enic/base/vnic_rq.h -+++ b/drivers/net/enic/base/vnic_rq.h -@@ -74,7 +74,7 @@ struct vnic_rq { - struct vnic_dev_ring ring; - struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */ - unsigned int mbuf_next_idx; /* next mb to consume */ -- void *os_buf_head; -+ void *mb_head; - unsigned int pkts_outstanding; - uint16_t rx_nb_hold; - uint16_t rx_free_thresh; -diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c -index a3ef417..ccbbd61 100644 ---- a/drivers/net/enic/base/vnic_wq.c -+++ b/drivers/net/enic/base/vnic_wq.c -@@ -59,71 +59,30 @@ int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, - - static int vnic_wq_alloc_bufs(struct vnic_wq *wq) - { -- struct vnic_wq_buf *buf; -- unsigned int i, j, count = wq->ring.desc_count; -- unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); -- -- for (i = 0; i < blks; i++) { -- wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); -- if (!wq->bufs[i]) -- return -ENOMEM; -- } -- -- for (i = 0; i < blks; i++) { -- buf = wq->bufs[i]; -- for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) { -- buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j; -- buf->desc = (u8 *)wq->ring.descs + -- wq->ring.desc_size * buf->index; -- if (buf->index + 1 == count) { -- buf->next = wq->bufs[0]; -- break; -- } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { -- buf->next = wq->bufs[i + 1]; -- } else { -- buf->next = buf + 1; -- buf++; -- } -- } -- } -- -- wq->to_use = wq->to_clean = wq->bufs[0]; -- -+ unsigned int count = wq->ring.desc_count; -+ /* Allocate the mbuf ring */ -+ wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs", -+ sizeof(struct vnic_wq_buf) * count, -+ RTE_CACHE_LINE_SIZE, wq->socket_id); -+ wq->head_idx = 0; -+ wq->tail_idx = 0; -+ if (wq->bufs == NULL) -+ return -ENOMEM; - return 0; - } - - void vnic_wq_free(struct vnic_wq *wq) - { - struct vnic_dev *vdev; -- unsigned int i; - - vdev = wq->vdev; - - vnic_dev_free_desc_ring(vdev, &wq->ring); - -- for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { -- if (wq->bufs[i]) { -- kfree(wq->bufs[i]); -- wq->bufs[i] = NULL; -- } -- } -- -+ rte_free(wq->bufs); - wq->ctrl = NULL; - } - --int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count, -- unsigned int desc_size) --{ -- int mem_size = 0; -- -- mem_size += vnic_dev_desc_ring_size(&wq->ring, desc_count, desc_size); -- -- mem_size += VNIC_WQ_BUF_BLKS_NEEDED(wq->ring.desc_count) * -- VNIC_WQ_BUF_BLK_SZ(wq->ring.desc_count); -- -- return mem_size; --} -- - - int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, - unsigned int desc_count, unsigned int desc_size) -@@ -172,9 +131,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, - iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); - iowrite32(0, &wq->ctrl->error_status); - -- wq->to_use = wq->to_clean = -- &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] -- [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; -+ wq->head_idx = fetch_index; -+ wq->tail_idx = wq->head_idx; - } - - void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, -@@ -184,6 +142,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, - vnic_wq_init_start(wq, cq_index, 0, 0, - error_interrupt_enable, - error_interrupt_offset); -+ wq->last_completed_index = 0; - } - - void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error) -@@ -219,22 +178,34 @@ int vnic_wq_disable(struct vnic_wq *wq) - return -ETIMEDOUT; - } - -+static inline uint32_t -+buf_idx_incr(uint32_t n_descriptors, uint32_t idx) -+{ -+ idx++; -+ if (unlikely(idx == n_descriptors)) -+ idx = 0; -+ return idx; -+} -+ - void vnic_wq_clean(struct vnic_wq *wq, -- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) -+ void (*buf_clean)(struct vnic_wq_buf *buf)) - { - struct vnic_wq_buf *buf; -+ unsigned int to_clean = wq->tail_idx; - -- buf = wq->to_clean; -+ buf = &wq->bufs[to_clean]; - - while (vnic_wq_desc_used(wq) > 0) { - -- (*buf_clean)(wq, buf); -+ (*buf_clean)(buf); -+ to_clean = buf_idx_incr(wq->ring.desc_count, to_clean); - -- buf = wq->to_clean = buf->next; -+ buf = &wq->bufs[to_clean]; - wq->ring.desc_avail++; - } - -- wq->to_use = wq->to_clean = wq->bufs[0]; -+ wq->head_idx = 0; -+ wq->tail_idx = 0; - - iowrite32(0, &wq->ctrl->fetch_index); - iowrite32(0, &wq->ctrl->posted_index); -diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h -index c23de62..37c3ff9 100644 ---- a/drivers/net/enic/base/vnic_wq.h -+++ b/drivers/net/enic/base/vnic_wq.h -@@ -64,42 +64,23 @@ struct vnic_wq_ctrl { - u32 pad9; - }; - -+/* 16 bytes */ - struct vnic_wq_buf { -- struct vnic_wq_buf *next; -- dma_addr_t dma_addr; -- void *os_buf; -- unsigned int len; -- unsigned int index; -- int sop; -- void *desc; -- uint64_t wr_id; /* Cookie */ -- uint8_t cq_entry; /* Gets completion event from hw */ -- uint8_t desc_skip_cnt; /* Num descs to occupy */ -- uint8_t compressed_send; /* Both hdr and payload in one desc */ -+ struct rte_mempool *pool; -+ void *mb; - }; - --/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ --#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 --#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 --#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ -- ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ -- VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)) --#define VNIC_WQ_BUF_BLK_SZ(entries) \ -- (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) --#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ -- DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) --#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) -- - struct vnic_wq { - unsigned int index; - struct vnic_dev *vdev; - struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ - struct vnic_dev_ring ring; -- struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; -- struct vnic_wq_buf *to_use; -- struct vnic_wq_buf *to_clean; -- unsigned int pkts_outstanding; -+ struct vnic_wq_buf *bufs; -+ unsigned int head_idx; -+ unsigned int tail_idx; - unsigned int socket_id; -+ const struct rte_memzone *cqmsg_rz; -+ uint16_t last_completed_index; - }; - - static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) -@@ -114,11 +95,6 @@ static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) - return wq->ring.desc_count - wq->ring.desc_avail - 1; - } - --static inline void *vnic_wq_next_desc(struct vnic_wq *wq) --{ -- return wq->to_use->desc; --} -- - #define PI_LOG2_CACHE_LINE_SIZE 5 - #define PI_INDEX_BITS 12 - #define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1) -@@ -191,75 +167,6 @@ static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len, - PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF); - } - --static inline void vnic_wq_post(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, -- unsigned int len, int sop, int eop, -- uint8_t desc_skip_cnt, uint8_t cq_entry, -- uint8_t compressed_send, uint64_t wrid) --{ -- struct vnic_wq_buf *buf = wq->to_use; -- -- buf->sop = sop; -- buf->cq_entry = cq_entry; -- buf->compressed_send = compressed_send; -- buf->desc_skip_cnt = desc_skip_cnt; -- buf->os_buf = os_buf; -- buf->dma_addr = dma_addr; -- buf->len = len; -- buf->wr_id = wrid; -- -- buf = buf->next; -- if (eop) { --#ifdef DO_PREFETCH -- uint64_t wr = vnic_cached_posted_index(dma_addr, len, -- buf->index); --#endif -- /* Adding write memory barrier prevents compiler and/or CPU -- * reordering, thus avoiding descriptor posting before -- * descriptor is initialized. Otherwise, hardware can read -- * stale descriptor fields. -- */ -- wmb(); --#ifdef DO_PREFETCH -- /* Intel chipsets seem to limit the rate of PIOs that we can -- * push on the bus. Thus, it is very important to do a single -- * 64 bit write here. With two 32-bit writes, my maximum -- * pkt/sec rate was cut almost in half. -AJF -- */ -- iowrite64((uint64_t)wr, &wq->ctrl->posted_index); --#else -- iowrite32(buf->index, &wq->ctrl->posted_index); --#endif -- } -- wq->to_use = buf; -- -- wq->ring.desc_avail -= desc_skip_cnt; --} -- --static inline void vnic_wq_service(struct vnic_wq *wq, -- struct cq_desc *cq_desc, u16 completed_index, -- void (*buf_service)(struct vnic_wq *wq, -- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), -- void *opaque) --{ -- struct vnic_wq_buf *buf; -- -- buf = wq->to_clean; -- while (1) { -- -- (*buf_service)(wq, cq_desc, buf, opaque); -- -- wq->ring.desc_avail++; -- -- wq->to_clean = buf->next; -- -- if (buf->index == completed_index) -- break; -- -- buf = wq->to_clean; -- } --} -- - void vnic_wq_free(struct vnic_wq *wq); - int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, - unsigned int desc_count, unsigned int desc_size); -@@ -275,8 +182,6 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq); - void vnic_wq_enable(struct vnic_wq *wq); - int vnic_wq_disable(struct vnic_wq *wq); - void vnic_wq_clean(struct vnic_wq *wq, -- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); --int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count, -- unsigned int desc_size); -+ void (*buf_clean)(struct vnic_wq_buf *buf)); - - #endif /* _VNIC_WQ_H_ */ -diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h -index 8c914f5..43b82a6 100644 ---- a/drivers/net/enic/enic.h -+++ b/drivers/net/enic/enic.h -@@ -155,6 +155,30 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev) - return (struct enic *)eth_dev->data->dev_private; - } - -+static inline uint32_t -+enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) -+{ -+ uint32_t d = i0 + i1; -+ d -= (d >= n_descriptors) ? n_descriptors : 0; -+ return d; -+} -+ -+static inline uint32_t -+enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1) -+{ -+ int32_t d = i1 - i0; -+ return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d); -+} -+ -+static inline uint32_t -+enic_ring_incr(uint32_t n_descriptors, uint32_t idx) -+{ -+ idx++; -+ if (unlikely(idx == n_descriptors)) -+ idx = 0; -+ return idx; -+} -+ - #define RTE_LIBRTE_ENIC_ASSERT_ENABLE - #ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE - #define ASSERT(x) do { \ -@@ -209,5 +233,6 @@ extern int enic_clsf_init(struct enic *enic); - extern void enic_clsf_destroy(struct enic *enic); - uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -- -+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, -+ uint16_t nb_pkts); - #endif /* _ENIC_H_ */ -diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c -index 6bea940..697ff82 100644 ---- a/drivers/net/enic/enic_ethdev.c -+++ b/drivers/net/enic/enic_ethdev.c -@@ -519,71 +519,6 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui - enic_del_mac_address(enic); - } - -- --static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, -- uint16_t nb_pkts) --{ -- uint16_t index; -- unsigned int frags; -- unsigned int pkt_len; -- unsigned int seg_len; -- unsigned int inc_len; -- unsigned int nb_segs; -- struct rte_mbuf *tx_pkt, *next_tx_pkt; -- struct vnic_wq *wq = (struct vnic_wq *)tx_queue; -- struct enic *enic = vnic_dev_priv(wq->vdev); -- unsigned short vlan_id; -- unsigned short ol_flags; -- uint8_t last_seg, eop; -- unsigned int host_tx_descs = 0; -- -- for (index = 0; index < nb_pkts; index++) { -- tx_pkt = *tx_pkts++; -- inc_len = 0; -- nb_segs = tx_pkt->nb_segs; -- if (nb_segs > vnic_wq_desc_avail(wq)) { -- if (index > 0) -- enic_post_wq_index(wq); -- -- /* wq cleanup and try again */ -- if (!enic_cleanup_wq(enic, wq) || -- (nb_segs > vnic_wq_desc_avail(wq))) { -- return index; -- } -- } -- -- pkt_len = tx_pkt->pkt_len; -- vlan_id = tx_pkt->vlan_tci; -- ol_flags = tx_pkt->ol_flags; -- for (frags = 0; inc_len < pkt_len; frags++) { -- if (!tx_pkt) -- break; -- next_tx_pkt = tx_pkt->next; -- seg_len = tx_pkt->data_len; -- inc_len += seg_len; -- -- host_tx_descs++; -- last_seg = 0; -- eop = 0; -- if ((pkt_len == inc_len) || !next_tx_pkt) { -- eop = 1; -- /* post if last packet in batch or > thresh */ -- if ((index == (nb_pkts - 1)) || -- (host_tx_descs > ENIC_TX_POST_THRESH)) { -- last_seg = 1; -- host_tx_descs = 0; -- } -- } -- enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len, -- !frags, eop, last_seg, ol_flags, vlan_id); -- tx_pkt = next_tx_pkt; -- } -- } -- -- enic_cleanup_wq(enic, wq); -- return index; --} -- - static const struct eth_dev_ops enicpmd_eth_dev_ops = { - .dev_configure = enicpmd_dev_configure, - .dev_start = enicpmd_dev_start, -@@ -642,7 +577,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) - enic->rte_dev = eth_dev; - eth_dev->dev_ops = &enicpmd_eth_dev_ops; - eth_dev->rx_pkt_burst = &enic_recv_pkts; -- eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts; -+ eth_dev->tx_pkt_burst = &enic_xmit_pkts; - - pdev = eth_dev->pci_dev; - rte_eth_copy_pci_info(eth_dev, pdev); -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index 646d87f..ba73604 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -40,11 +40,11 @@ - #include - - #include --#include - #include - #include - #include - #include -+#include - - #include "enic_compat.h" - #include "enic.h" -@@ -58,7 +58,6 @@ - #include "vnic_cq.h" - #include "vnic_intr.h" - #include "vnic_nic.h" --#include "enic_vnic_wq.h" - - static inline struct rte_mbuf * - rte_rxmbuf_alloc(struct rte_mempool *mp) -@@ -109,38 +108,17 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) - } - } - -- - void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size) - { - vnic_set_hdr_split_size(enic->vdev, split_hdr_size); - } - --static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf) -+static void enic_free_wq_buf(struct vnic_wq_buf *buf) - { -- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf; -+ struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb; - - rte_mempool_put(mbuf->pool, mbuf); -- buf->os_buf = NULL; --} -- --static void enic_wq_free_buf(struct vnic_wq *wq, -- __rte_unused struct cq_desc *cq_desc, -- struct vnic_wq_buf *buf, -- __rte_unused void *opaque) --{ -- enic_free_wq_buf(wq, buf); --} -- --static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, -- __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque) --{ -- struct enic *enic = vnic_dev_priv(vdev); -- -- vnic_wq_service(&enic->wq[q_number], cq_desc, -- completed_index, enic_wq_free_buf, -- opaque); -- -- return 0; -+ buf->mb = NULL; - } - - static void enic_log_q_error(struct enic *enic) -@@ -163,64 +141,6 @@ static void enic_log_q_error(struct enic *enic) - } - } - --unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq) --{ -- unsigned int cq = enic_cq_wq(enic, wq->index); -- -- /* Return the work done */ -- return vnic_cq_service(&enic->cq[cq], -- -1 /*wq_work_to_do*/, enic_wq_service, NULL); --} -- --void enic_post_wq_index(struct vnic_wq *wq) --{ -- enic_vnic_post_wq_index(wq); --} -- --void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, -- struct rte_mbuf *tx_pkt, unsigned short len, -- uint8_t sop, uint8_t eop, uint8_t cq_entry, -- uint16_t ol_flags, uint16_t vlan_tag) --{ -- struct wq_enet_desc *desc = vnic_wq_next_desc(wq); -- uint16_t mss = 0; -- uint8_t vlan_tag_insert = 0; -- uint64_t bus_addr = (dma_addr_t) -- (tx_pkt->buf_physaddr + tx_pkt->data_off); -- -- if (sop) { -- if (ol_flags & PKT_TX_VLAN_PKT) -- vlan_tag_insert = 1; -- -- if (enic->hw_ip_checksum) { -- if (ol_flags & PKT_TX_IP_CKSUM) -- mss |= ENIC_CALC_IP_CKSUM; -- -- if (ol_flags & PKT_TX_TCP_UDP_CKSUM) -- mss |= ENIC_CALC_TCP_UDP_CKSUM; -- } -- } -- -- wq_enet_desc_enc(desc, -- bus_addr, -- len, -- mss, -- 0 /* header_length */, -- 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */, -- eop, -- cq_entry, -- 0 /* fcoe_encap */, -- vlan_tag_insert, -- vlan_tag, -- 0 /* loopback */); -- -- enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len, -- sop, -- 1 /*desc_skip_cnt*/, -- cq_entry, -- 0 /*compressed send*/, -- 0 /*wrid*/); --} - - void enic_dev_stats_clear(struct enic *enic) - { -@@ -297,12 +217,28 @@ void enic_init_vnic_resources(struct enic *enic) - unsigned int error_interrupt_enable = 1; - unsigned int error_interrupt_offset = 0; - unsigned int index = 0; -+ unsigned int cq_idx; -+ -+ vnic_dev_stats_clear(enic->vdev); - - for (index = 0; index < enic->rq_count; index++) { - vnic_rq_init(&enic->rq[index], - enic_cq_rq(enic, index), - error_interrupt_enable, - error_interrupt_offset); -+ -+ cq_idx = enic_cq_rq(enic, index); -+ vnic_cq_init(&enic->cq[cq_idx], -+ 0 /* flow_control_enable */, -+ 1 /* color_enable */, -+ 0 /* cq_head */, -+ 0 /* cq_tail */, -+ 1 /* cq_tail_color */, -+ 0 /* interrupt_enable */, -+ 1 /* cq_entry_enable */, -+ 0 /* cq_message_enable */, -+ 0 /* interrupt offset */, -+ 0 /* cq_message_addr */); - } - - for (index = 0; index < enic->wq_count; index++) { -@@ -310,22 +246,19 @@ void enic_init_vnic_resources(struct enic *enic) - enic_cq_wq(enic, index), - error_interrupt_enable, - error_interrupt_offset); -- } -- -- vnic_dev_stats_clear(enic->vdev); - -- for (index = 0; index < enic->cq_count; index++) { -- vnic_cq_init(&enic->cq[index], -+ cq_idx = enic_cq_wq(enic, index); -+ vnic_cq_init(&enic->cq[cq_idx], - 0 /* flow_control_enable */, - 1 /* color_enable */, - 0 /* cq_head */, - 0 /* cq_tail */, - 1 /* cq_tail_color */, - 0 /* interrupt_enable */, -- 1 /* cq_entry_enable */, -- 0 /* cq_message_enable */, -+ 0 /* cq_entry_enable */, -+ 1 /* cq_message_enable */, - 0 /* interrupt offset */, -- 0 /* cq_message_addr */); -+ (u64)enic->wq[index].cqmsg_rz->phys_addr); - } - - vnic_intr_init(&enic->intr, -@@ -569,6 +502,7 @@ void enic_free_wq(void *txq) - struct vnic_wq *wq = (struct vnic_wq *)txq; - struct enic *enic = vnic_dev_priv(wq->vdev); - -+ rte_memzone_free(wq->cqmsg_rz); - vnic_wq_free(wq); - vnic_cq_free(&enic->cq[enic->rq_count + wq->index]); - } -@@ -579,6 +513,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, - int err; - struct vnic_wq *wq = &enic->wq[queue_idx]; - unsigned int cq_index = enic_cq_wq(enic, queue_idx); -+ char name[NAME_MAX]; -+ static int instance; - - wq->socket_id = socket_id; - if (nb_desc) { -@@ -614,6 +550,18 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, - dev_err(enic, "error in allocation of cq for wq\n"); - } - -+ /* setup up CQ message */ -+ snprintf((char *)name, sizeof(name), -+ "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx, -+ instance++); -+ -+ wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name, -+ sizeof(uint32_t), -+ SOCKET_ID_ANY, 0, -+ ENIC_ALIGN); -+ if (!wq->cqmsg_rz) -+ return -ENOMEM; -+ - return err; - } - -diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h -index 00fa71d..3e1bdf5 100644 ---- a/drivers/net/enic/enic_res.h -+++ b/drivers/net/enic/enic_res.h -@@ -53,89 +53,10 @@ - - #define ENIC_NON_TSO_MAX_DESC 16 - #define ENIC_DEFAULT_RX_FREE_THRESH 32 --#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2) -+#define ENIC_TX_XMIT_MAX 64 - - #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) - --static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, unsigned int len, -- unsigned int mss_or_csum_offset, unsigned int hdr_len, -- int vlan_tag_insert, unsigned int vlan_tag, -- int offload_mode, int cq_entry, int sop, int eop, int loopback) --{ -- struct wq_enet_desc *desc = vnic_wq_next_desc(wq); -- u8 desc_skip_cnt = 1; -- u8 compressed_send = 0; -- u64 wrid = 0; -- -- wq_enet_desc_enc(desc, -- (u64)dma_addr | VNIC_PADDR_TARGET, -- (u16)len, -- (u16)mss_or_csum_offset, -- (u16)hdr_len, (u8)offload_mode, -- (u8)eop, (u8)cq_entry, -- 0, /* fcoe_encap */ -- (u8)vlan_tag_insert, -- (u16)vlan_tag, -- (u8)loopback); -- -- vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, -- (u8)cq_entry, compressed_send, wrid); --} -- --static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, unsigned int len, -- int eop, int loopback) --{ -- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, -- 0, 0, 0, 0, 0, -- eop, 0 /* !SOP */, eop, loopback); --} -- --static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, -- dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert, -- unsigned int vlan_tag, int eop, int loopback) --{ -- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, -- 0, 0, vlan_tag_insert, vlan_tag, -- WQ_ENET_OFFLOAD_MODE_CSUM, -- eop, 1 /* SOP */, eop, loopback); --} -- --static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, unsigned int len, -- int ip_csum, int tcpudp_csum, int vlan_tag_insert, -- unsigned int vlan_tag, int eop, int loopback) --{ -- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, -- (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0), -- 0, vlan_tag_insert, vlan_tag, -- WQ_ENET_OFFLOAD_MODE_CSUM, -- eop, 1 /* SOP */, eop, loopback); --} -- --static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, unsigned int len, -- unsigned int csum_offset, unsigned int hdr_len, -- int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) --{ -- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, -- csum_offset, hdr_len, vlan_tag_insert, vlan_tag, -- WQ_ENET_OFFLOAD_MODE_CSUM_L4, -- eop, 1 /* SOP */, eop, loopback); --} -- --static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, -- void *os_buf, dma_addr_t dma_addr, unsigned int len, -- unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, -- unsigned int vlan_tag, int eop, int loopback) --{ -- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, -- mss, hdr_len, vlan_tag_insert, vlan_tag, -- WQ_ENET_OFFLOAD_MODE_TSO, -- eop, 1 /* SOP */, eop, loopback); --} -- - struct enic; - - int enic_get_vnic_config(struct enic *); -diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c -deleted file mode 100644 -index 39bb55c..0000000 ---- a/drivers/net/enic/enic_rx.c -+++ /dev/null -@@ -1,361 +0,0 @@ --/* -- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. -- * Copyright 2007 Nuova Systems, Inc. All rights reserved. -- * -- * Copyright (c) 2014, Cisco Systems, Inc. -- * All rights reserved. -- * -- * Redistribution and use in source and binary forms, with or without -- * modification, are permitted provided that the following conditions -- * are met: -- * -- * 1. Redistributions of source code must retain the above copyright -- * notice, this list of conditions and the following disclaimer. -- * -- * 2. Redistributions in binary form must reproduce the above copyright -- * notice, this list of conditions and the following disclaimer in -- * the documentation and/or other materials provided with the -- * distribution. -- * -- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -- * POSSIBILITY OF SUCH DAMAGE. -- * -- */ -- --#include --#include --#include -- --#include "enic_compat.h" --#include "rq_enet_desc.h" --#include "enic.h" -- --#define RTE_PMD_USE_PREFETCH -- --#ifdef RTE_PMD_USE_PREFETCH --/* -- * Prefetch a cache line into all cache levels. -- */ --#define rte_enic_prefetch(p) rte_prefetch0(p) --#else --#define rte_enic_prefetch(p) do {} while (0) --#endif -- --#ifdef RTE_PMD_PACKET_PREFETCH --#define rte_packet_prefetch(p) rte_prefetch1(p) --#else --#define rte_packet_prefetch(p) do {} while (0) --#endif -- --static inline struct rte_mbuf * --rte_rxmbuf_alloc(struct rte_mempool *mp) --{ -- struct rte_mbuf *m; -- -- m = __rte_mbuf_raw_alloc(mp); -- __rte_mbuf_sanity_check_raw(m, 0); -- return m; --} -- --static inline uint16_t --enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) --{ -- return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; --} -- --static inline uint16_t --enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) --{ -- return(le16_to_cpu(crd->bytes_written_flags) & -- ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK); --} -- --static inline uint8_t --enic_cq_rx_desc_packet_error(uint16_t bwflags) --{ -- return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == -- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED); --} -- --static inline uint8_t --enic_cq_rx_desc_eop(uint16_t ciflags) --{ -- return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) -- == CQ_ENET_RQ_DESC_FLAGS_EOP; --} -- --static inline uint8_t --enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) --{ -- return ((le16_to_cpu(cqrd->q_number_rss_type_flags) & -- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == -- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC); --} -- --static inline uint8_t --enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) --{ -- return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == -- CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK); --} -- --static inline uint8_t --enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) --{ -- return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == -- CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK); --} -- --static inline uint8_t --enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) --{ -- return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> -- CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); --} -- --static inline uint32_t --enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) --{ -- return le32_to_cpu(cqrd->rss_hash); --} -- --static inline uint16_t --enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) --{ -- return le16_to_cpu(cqrd->vlan); --} -- --static inline uint16_t --enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) --{ -- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -- return le16_to_cpu(cqrd->bytes_written_flags) & -- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; --} -- --static inline uint8_t --enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out) --{ -- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -- uint16_t bwflags; -- int ret = 0; -- uint64_t pkt_err_flags = 0; -- -- bwflags = enic_cq_rx_desc_bwflags(cqrd); -- if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) { -- pkt_err_flags = PKT_RX_MAC_ERR; -- ret = 1; -- } -- *pkt_err_flags_out = pkt_err_flags; -- return ret; --} -- --/* -- * Lookup table to translate RX CQ flags to mbuf flags. -- */ --static inline uint32_t --enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) --{ -- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -- uint8_t cqrd_flags = cqrd->flags; -- static const uint32_t cq_type_table[128] __rte_cache_aligned = { -- [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, -- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -- | RTE_PTYPE_L4_UDP, -- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -- | RTE_PTYPE_L4_TCP, -- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -- | RTE_PTYPE_L4_FRAG, -- [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, -- [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -- | RTE_PTYPE_L4_UDP, -- [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -- | RTE_PTYPE_L4_TCP, -- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -- | RTE_PTYPE_L4_FRAG, -- /* All others reserved */ -- }; -- cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT -- | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 -- | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; -- return cq_type_table[cqrd_flags]; --} -- --static inline void --enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) --{ -- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -- uint16_t ciflags, bwflags, pkt_flags = 0; -- ciflags = enic_cq_rx_desc_ciflags(cqrd); -- bwflags = enic_cq_rx_desc_bwflags(cqrd); -- -- mbuf->ol_flags = 0; -- -- /* flags are meaningless if !EOP */ -- if (unlikely(!enic_cq_rx_desc_eop(ciflags))) -- goto mbuf_flags_done; -- -- /* VLAN stripping */ -- if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { -- pkt_flags |= PKT_RX_VLAN_PKT; -- mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); -- } else { -- mbuf->vlan_tci = 0; -- } -- -- /* RSS flag */ -- if (enic_cq_rx_desc_rss_type(cqrd)) { -- pkt_flags |= PKT_RX_RSS_HASH; -- mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); -- } -- -- /* checksum flags */ -- if (!enic_cq_rx_desc_csum_not_calc(cqrd) && -- (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) { -- if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd))) -- pkt_flags |= PKT_RX_IP_CKSUM_BAD; -- if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { -- if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))) -- pkt_flags |= PKT_RX_L4_CKSUM_BAD; -- } -- } -- -- mbuf_flags_done: -- mbuf->ol_flags = pkt_flags; --} -- --static inline uint32_t --enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) --{ -- uint32_t d = i0 + i1; -- ASSERT(i0 < n_descriptors); -- ASSERT(i1 < n_descriptors); -- d -= (d >= n_descriptors) ? n_descriptors : 0; -- return d; --} -- -- --uint16_t --enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, -- uint16_t nb_pkts) --{ -- struct vnic_rq *rq = rx_queue; -- struct enic *enic = vnic_dev_priv(rq->vdev); -- unsigned int rx_id; -- struct rte_mbuf *nmb, *rxmb; -- uint16_t nb_rx = 0; -- uint16_t nb_hold; -- struct vnic_cq *cq; -- volatile struct cq_desc *cqd_ptr; -- uint8_t color; -- -- cq = &enic->cq[enic_cq_rq(enic, rq->index)]; -- rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */ -- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -- -- nb_hold = rq->rx_nb_hold; /* mbufs held by software */ -- -- while (nb_rx < nb_pkts) { -- volatile struct rq_enet_desc *rqd_ptr; -- dma_addr_t dma_addr; -- struct cq_desc cqd; -- uint64_t ol_err_flags; -- uint8_t packet_error; -- -- /* Check for pkts available */ -- color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) -- & CQ_DESC_COLOR_MASK; -- if (color == cq->last_color) -- break; -- -- /* Get the cq descriptor and rq pointer */ -- cqd = *cqd_ptr; -- rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id; -- -- /* allocate a new mbuf */ -- nmb = rte_rxmbuf_alloc(rq->mp); -- if (nmb == NULL) { -- dev_err(enic, "RX mbuf alloc failed port=%u qid=%u", -- enic->port_id, (unsigned)rq->index); -- rte_eth_devices[enic->port_id]. -- data->rx_mbuf_alloc_failed++; -- break; -- } -- -- /* A packet error means descriptor and data are untrusted */ -- packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags); -- -- /* Get the mbuf to return and replace with one just allocated */ -- rxmb = rq->mbuf_ring[rx_id]; -- rq->mbuf_ring[rx_id] = nmb; -- -- /* Increment cqd, rqd, mbuf_table index */ -- rx_id++; -- if (unlikely(rx_id == rq->ring.desc_count)) { -- rx_id = 0; -- cq->last_color = cq->last_color ? 0 : 1; -- } -- -- /* Prefetch next mbuf & desc while processing current one */ -- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -- rte_enic_prefetch(cqd_ptr); -- rte_enic_prefetch(rq->mbuf_ring[rx_id]); -- rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs) -- + rx_id); -- -- /* Push descriptor for newly allocated mbuf */ -- dma_addr = (dma_addr_t)(nmb->buf_physaddr -- + RTE_PKTMBUF_HEADROOM); -- rqd_ptr->address = rte_cpu_to_le_64(dma_addr); -- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len -- - RTE_PKTMBUF_HEADROOM); -- -- /* Fill in the rest of the mbuf */ -- rxmb->data_off = RTE_PKTMBUF_HEADROOM; -- rxmb->nb_segs = 1; -- rxmb->next = NULL; -- rxmb->port = enic->port_id; -- if (!packet_error) { -- rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd); -- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); -- enic_cq_rx_to_pkt_flags(&cqd, rxmb); -- } else { -- rxmb->pkt_len = 0; -- rxmb->packet_type = 0; -- rxmb->ol_flags = 0; -- } -- rxmb->data_len = rxmb->pkt_len; -- -- /* prefetch mbuf data for caller */ -- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, -- RTE_PKTMBUF_HEADROOM)); -- -- /* store the mbuf address into the next entry of the array */ -- rx_pkts[nb_rx++] = rxmb; -- } -- -- nb_hold += nb_rx; -- cq->to_clean = rx_id; -- -- if (nb_hold > rq->rx_free_thresh) { -- rq->posted_index = enic_ring_add(rq->ring.desc_count, -- rq->posted_index, nb_hold); -- nb_hold = 0; -- rte_mb(); -- iowrite32(rq->posted_index, &rq->ctrl->posted_index); -- } -- -- rq->rx_nb_hold = nb_hold; -- -- return nb_rx; --} -diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c -new file mode 100644 -index 0000000..71ca34e ---- /dev/null -+++ b/drivers/net/enic/enic_rxtx.c -@@ -0,0 +1,505 @@ -+/* -+ * Copyright 2008-2016 Cisco Systems, Inc. All rights reserved. -+ * Copyright 2007 Nuova Systems, Inc. All rights reserved. -+ * -+ * Copyright (c) 2016, Cisco Systems, Inc. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * 1. Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * -+ * 2. Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "enic_compat.h" -+#include "rq_enet_desc.h" -+#include "enic.h" -+ -+#define RTE_PMD_USE_PREFETCH -+ -+#ifdef RTE_PMD_USE_PREFETCH -+/* -+ * Prefetch a cache line into all cache levels. -+ */ -+#define rte_enic_prefetch(p) rte_prefetch0(p) -+#else -+#define rte_enic_prefetch(p) do {} while (0) -+#endif -+ -+#ifdef RTE_PMD_PACKET_PREFETCH -+#define rte_packet_prefetch(p) rte_prefetch1(p) -+#else -+#define rte_packet_prefetch(p) do {} while (0) -+#endif -+ -+static inline struct rte_mbuf * -+rte_rxmbuf_alloc(struct rte_mempool *mp) -+{ -+ struct rte_mbuf *m; -+ -+ m = __rte_mbuf_raw_alloc(mp); -+ __rte_mbuf_sanity_check_raw(m, 0); -+ return m; -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) -+{ -+ return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) -+{ -+ return(le16_to_cpu(crd->bytes_written_flags) & -+ ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_packet_error(uint16_t bwflags) -+{ -+ return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == -+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_eop(uint16_t ciflags) -+{ -+ return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) -+ == CQ_ENET_RQ_DESC_FLAGS_EOP; -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) -+{ -+ return ((le16_to_cpu(cqrd->q_number_rss_type_flags) & -+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == -+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) -+{ -+ return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == -+ CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) -+{ -+ return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == -+ CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK); -+} -+ -+static inline uint8_t -+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) -+{ -+ return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> -+ CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); -+} -+ -+static inline uint32_t -+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) -+{ -+ return le32_to_cpu(cqrd->rss_hash); -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) -+{ -+ return le16_to_cpu(cqrd->vlan); -+} -+ -+static inline uint16_t -+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ return le16_to_cpu(cqrd->bytes_written_flags) & -+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; -+} -+ -+static inline uint8_t -+enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ uint16_t bwflags; -+ int ret = 0; -+ uint64_t pkt_err_flags = 0; -+ -+ bwflags = enic_cq_rx_desc_bwflags(cqrd); -+ if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) { -+ pkt_err_flags = PKT_RX_MAC_ERR; -+ ret = 1; -+ } -+ *pkt_err_flags_out = pkt_err_flags; -+ return ret; -+} -+ -+/* -+ * Lookup table to translate RX CQ flags to mbuf flags. -+ */ -+static inline uint32_t -+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ uint8_t cqrd_flags = cqrd->flags; -+ static const uint32_t cq_type_table[128] __rte_cache_aligned = { -+ [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, -+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -+ | RTE_PTYPE_L4_UDP, -+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -+ | RTE_PTYPE_L4_TCP, -+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 -+ | RTE_PTYPE_L4_FRAG, -+ [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, -+ [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -+ | RTE_PTYPE_L4_UDP, -+ [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -+ | RTE_PTYPE_L4_TCP, -+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 -+ | RTE_PTYPE_L4_FRAG, -+ /* All others reserved */ -+ }; -+ cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT -+ | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 -+ | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; -+ return cq_type_table[cqrd_flags]; -+} -+ -+static inline void -+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) -+{ -+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; -+ uint16_t ciflags, bwflags, pkt_flags = 0; -+ ciflags = enic_cq_rx_desc_ciflags(cqrd); -+ bwflags = enic_cq_rx_desc_bwflags(cqrd); -+ -+ mbuf->ol_flags = 0; -+ -+ /* flags are meaningless if !EOP */ -+ if (unlikely(!enic_cq_rx_desc_eop(ciflags))) -+ goto mbuf_flags_done; -+ -+ /* VLAN stripping */ -+ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { -+ pkt_flags |= PKT_RX_VLAN_PKT; -+ mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); -+ } else { -+ mbuf->vlan_tci = 0; -+ } -+ -+ /* RSS flag */ -+ if (enic_cq_rx_desc_rss_type(cqrd)) { -+ pkt_flags |= PKT_RX_RSS_HASH; -+ mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); -+ } -+ -+ /* checksum flags */ -+ if (!enic_cq_rx_desc_csum_not_calc(cqrd) && -+ (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) { -+ if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd))) -+ pkt_flags |= PKT_RX_IP_CKSUM_BAD; -+ if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { -+ if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))) -+ pkt_flags |= PKT_RX_L4_CKSUM_BAD; -+ } -+ } -+ -+ mbuf_flags_done: -+ mbuf->ol_flags = pkt_flags; -+} -+ -+uint16_t -+enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, -+ uint16_t nb_pkts) -+{ -+ struct vnic_rq *rq = rx_queue; -+ struct enic *enic = vnic_dev_priv(rq->vdev); -+ unsigned int rx_id; -+ struct rte_mbuf *nmb, *rxmb; -+ uint16_t nb_rx = 0; -+ uint16_t nb_hold; -+ struct vnic_cq *cq; -+ volatile struct cq_desc *cqd_ptr; -+ uint8_t color; -+ -+ cq = &enic->cq[enic_cq_rq(enic, rq->index)]; -+ rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */ -+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -+ -+ nb_hold = rq->rx_nb_hold; /* mbufs held by software */ -+ -+ while (nb_rx < nb_pkts) { -+ volatile struct rq_enet_desc *rqd_ptr; -+ dma_addr_t dma_addr; -+ struct cq_desc cqd; -+ uint64_t ol_err_flags; -+ uint8_t packet_error; -+ -+ /* Check for pkts available */ -+ color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) -+ & CQ_DESC_COLOR_MASK; -+ if (color == cq->last_color) -+ break; -+ -+ /* Get the cq descriptor and rq pointer */ -+ cqd = *cqd_ptr; -+ rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id; -+ -+ /* allocate a new mbuf */ -+ nmb = rte_rxmbuf_alloc(rq->mp); -+ if (nmb == NULL) { -+ dev_err(enic, "RX mbuf alloc failed port=%u qid=%u", -+ enic->port_id, (unsigned)rq->index); -+ rte_eth_devices[enic->port_id]. -+ data->rx_mbuf_alloc_failed++; -+ break; -+ } -+ -+ /* A packet error means descriptor and data are untrusted */ -+ packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags); -+ -+ /* Get the mbuf to return and replace with one just allocated */ -+ rxmb = rq->mbuf_ring[rx_id]; -+ rq->mbuf_ring[rx_id] = nmb; -+ -+ /* Increment cqd, rqd, mbuf_table index */ -+ rx_id++; -+ if (unlikely(rx_id == rq->ring.desc_count)) { -+ rx_id = 0; -+ cq->last_color = cq->last_color ? 0 : 1; -+ } -+ -+ /* Prefetch next mbuf & desc while processing current one */ -+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; -+ rte_enic_prefetch(cqd_ptr); -+ rte_enic_prefetch(rq->mbuf_ring[rx_id]); -+ rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs) -+ + rx_id); -+ -+ /* Push descriptor for newly allocated mbuf */ -+ dma_addr = (dma_addr_t)(nmb->buf_physaddr -+ + RTE_PKTMBUF_HEADROOM); -+ rqd_ptr->address = rte_cpu_to_le_64(dma_addr); -+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len -+ - RTE_PKTMBUF_HEADROOM); -+ -+ /* Fill in the rest of the mbuf */ -+ rxmb->data_off = RTE_PKTMBUF_HEADROOM; -+ rxmb->nb_segs = 1; -+ rxmb->next = NULL; -+ rxmb->port = enic->port_id; -+ if (!packet_error) { -+ rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd); -+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); -+ enic_cq_rx_to_pkt_flags(&cqd, rxmb); -+ } else { -+ rxmb->pkt_len = 0; -+ rxmb->packet_type = 0; -+ rxmb->ol_flags = 0; -+ } -+ rxmb->data_len = rxmb->pkt_len; -+ -+ /* prefetch mbuf data for caller */ -+ rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, -+ RTE_PKTMBUF_HEADROOM)); -+ -+ /* store the mbuf address into the next entry of the array */ -+ rx_pkts[nb_rx++] = rxmb; -+ } -+ -+ nb_hold += nb_rx; -+ cq->to_clean = rx_id; -+ -+ if (nb_hold > rq->rx_free_thresh) { -+ rq->posted_index = enic_ring_add(rq->ring.desc_count, -+ rq->posted_index, nb_hold); -+ nb_hold = 0; -+ rte_mb(); -+ iowrite32(rq->posted_index, &rq->ctrl->posted_index); -+ } -+ -+ rq->rx_nb_hold = nb_hold; -+ -+ return nb_rx; -+} -+ -+static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) -+{ -+ struct vnic_wq_buf *buf; -+ struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS]; -+ unsigned int nb_to_free, nb_free = 0, i; -+ struct rte_mempool *pool; -+ unsigned int tail_idx; -+ unsigned int desc_count = wq->ring.desc_count; -+ -+ nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index) -+ + 1; -+ tail_idx = wq->tail_idx; -+ buf = &wq->bufs[tail_idx]; -+ pool = ((struct rte_mbuf *)buf->mb)->pool; -+ for (i = 0; i < nb_to_free; i++) { -+ buf = &wq->bufs[tail_idx]; -+ m = (struct rte_mbuf *)(buf->mb); -+ if (likely(m->pool == pool)) { -+ ASSERT(nb_free < ENIC_MAX_WQ_DESCS); -+ free[nb_free++] = m; -+ } else { -+ rte_mempool_put_bulk(pool, (void *)free, nb_free); -+ free[0] = m; -+ nb_free = 1; -+ pool = m->pool; -+ } -+ tail_idx = enic_ring_incr(desc_count, tail_idx); -+ buf->mb = NULL; -+ } -+ -+ rte_mempool_put_bulk(pool, (void **)free, nb_free); -+ -+ wq->tail_idx = tail_idx; -+ wq->ring.desc_avail += nb_to_free; -+} -+ -+unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) -+{ -+ u16 completed_index; -+ -+ completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff; -+ -+ if (wq->last_completed_index != completed_index) { -+ enic_free_wq_bufs(wq, completed_index); -+ wq->last_completed_index = completed_index; -+ } -+ return 0; -+} -+ -+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, -+ uint16_t nb_pkts) -+{ -+ uint16_t index; -+ unsigned int pkt_len, data_len; -+ unsigned int nb_segs; -+ struct rte_mbuf *tx_pkt; -+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue; -+ struct enic *enic = vnic_dev_priv(wq->vdev); -+ unsigned short vlan_id; -+ unsigned short ol_flags; -+ unsigned int wq_desc_avail; -+ int head_idx; -+ struct vnic_wq_buf *buf; -+ unsigned int hw_ip_cksum_enabled; -+ unsigned int desc_count; -+ struct wq_enet_desc *descs, *desc_p, desc_tmp; -+ uint16_t mss; -+ uint8_t vlan_tag_insert; -+ uint8_t eop; -+ uint64_t bus_addr; -+ -+ enic_cleanup_wq(enic, wq); -+ wq_desc_avail = vnic_wq_desc_avail(wq); -+ head_idx = wq->head_idx; -+ desc_count = wq->ring.desc_count; -+ -+ nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX); -+ -+ hw_ip_cksum_enabled = enic->hw_ip_checksum; -+ for (index = 0; index < nb_pkts; index++) { -+ tx_pkt = *tx_pkts++; -+ nb_segs = tx_pkt->nb_segs; -+ if (nb_segs > wq_desc_avail) { -+ if (index > 0) -+ goto post; -+ goto done; -+ } -+ -+ pkt_len = tx_pkt->pkt_len; -+ data_len = tx_pkt->data_len; -+ vlan_id = tx_pkt->vlan_tci; -+ ol_flags = tx_pkt->ol_flags; -+ -+ mss = 0; -+ vlan_tag_insert = 0; -+ bus_addr = (dma_addr_t) -+ (tx_pkt->buf_physaddr + tx_pkt->data_off); -+ -+ descs = (struct wq_enet_desc *)wq->ring.descs; -+ desc_p = descs + head_idx; -+ -+ eop = (data_len == pkt_len); -+ -+ if (ol_flags & PKT_TX_VLAN_PKT) -+ vlan_tag_insert = 1; -+ -+ if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_IP_CKSUM)) -+ mss |= ENIC_CALC_IP_CKSUM; -+ -+ if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_TCP_UDP_CKSUM)) -+ mss |= ENIC_CALC_TCP_UDP_CKSUM; -+ -+ wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop, -+ eop, 0, vlan_tag_insert, vlan_id, 0); -+ -+ *desc_p = desc_tmp; -+ buf = &wq->bufs[head_idx]; -+ buf->mb = (void *)tx_pkt; -+ head_idx = enic_ring_incr(desc_count, head_idx); -+ wq_desc_avail--; -+ -+ if (!eop) { -+ for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt = -+ tx_pkt->next) { -+ data_len = tx_pkt->data_len; -+ -+ if (tx_pkt->next == NULL) -+ eop = 1; -+ desc_p = descs + head_idx; -+ bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr -+ + tx_pkt->data_off); -+ wq_enet_desc_enc((struct wq_enet_desc *) -+ &desc_tmp, bus_addr, data_len, -+ mss, 0, 0, eop, eop, 0, -+ vlan_tag_insert, vlan_id, 0); -+ -+ *desc_p = desc_tmp; -+ buf = &wq->bufs[head_idx]; -+ buf->mb = (void *)tx_pkt; -+ head_idx = enic_ring_incr(desc_count, head_idx); -+ wq_desc_avail--; -+ } -+ } -+ } -+ post: -+ rte_wmb(); -+ iowrite32(head_idx, &wq->ctrl->posted_index); -+ done: -+ wq->ring.desc_avail = wq_desc_avail; -+ wq->head_idx = head_idx; -+ -+ return index; -+} --- -2.7.0 - -- cgit 1.2.3-korg From a10f62b11e7a710fde628ae75fe5791e54caba0a Mon Sep 17 00:00:00 2001 From: Sergio Gonzalez Monroy Date: Fri, 25 Nov 2016 13:36:12 +0000 Subject: dpdk: add ipsec cryptodev support DPDK Cryptodev support and related IPsec ESP nodes using DPDK Cryptodev APIs. When DPDK Cryptodev support is enabled, the node graph is modified by adding and replacing some of the nodes. The following nodes are replaced: * esp-encrypt -> dpdk-esp-encrypt * esp-decrypt -> dpdk-esp-decrypt The following nodes are added: * dpdk-crypto-input : polling input node * dpdk-esp-encrypt-post : internal node * dpdk-esp-decrypt-post : internal node Change-Id: I6dca9a890abaf4fb2a4fffce3fd08ac013e4d701 Signed-off-by: Zhang, Roy Fan Signed-off-by: Sergio Gonzalez Monroy --- build-data/packages/dpdk.mk | 4 + build-data/packages/vnet.mk | 3 + build-data/packages/vpp.mk | 3 + build-data/platforms/vpp.mk | 1 + doxygen/user_doc.md | 1 + dpdk/Makefile | 4 + vnet/Makefile.am | 17 +- vnet/configure.ac | 8 + vnet/vnet/devices/dpdk/ipsec/cli.c | 141 ++++++ vnet/vnet/devices/dpdk/ipsec/crypto_node.c | 209 ++++++++ vnet/vnet/devices/dpdk/ipsec/dir.dox | 18 + .../devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md | 73 +++ vnet/vnet/devices/dpdk/ipsec/esp.h | 268 ++++++++++ vnet/vnet/devices/dpdk/ipsec/esp_decrypt.c | 549 ++++++++++++++++++++ vnet/vnet/devices/dpdk/ipsec/esp_encrypt.c | 554 +++++++++++++++++++++ vnet/vnet/devices/dpdk/ipsec/ipsec.c | 313 ++++++++++++ vnet/vnet/devices/dpdk/ipsec/ipsec.h | 216 ++++++++ vnet/vnet/ipsec-gre/interface.c | 9 +- vnet/vnet/ipsec/esp.h | 151 ++++++ vnet/vnet/ipsec/esp_decrypt.c | 121 ----- vnet/vnet/ipsec/esp_encrypt.c | 25 - vnet/vnet/ipsec/ipsec.c | 31 +- vnet/vnet/ipsec/ipsec.h | 5 + vnet/vnet/ipsec/ipsec_cli.c | 1 - vnet/vnet/ipsec/ipsec_if.c | 39 ++ vnet/vnet/ipsec/ipsec_if_in.c | 10 +- vnet/vnet/ipsec/ipsec_if_out.c | 7 +- vnet/vnet/ipsec/ipsec_input.c | 8 +- vnet/vnet/ipsec/ipsec_output.c | 8 +- vpp/Makefile.am | 5 +- vpp/configure.ac | 8 + 31 files changed, 2651 insertions(+), 159 deletions(-) create mode 100644 vnet/vnet/devices/dpdk/ipsec/cli.c create mode 100644 vnet/vnet/devices/dpdk/ipsec/crypto_node.c create mode 100644 vnet/vnet/devices/dpdk/ipsec/dir.dox create mode 100644 vnet/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md create mode 100644 vnet/vnet/devices/dpdk/ipsec/esp.h create mode 100644 vnet/vnet/devices/dpdk/ipsec/esp_decrypt.c create mode 100644 vnet/vnet/devices/dpdk/ipsec/esp_encrypt.c create mode 100644 vnet/vnet/devices/dpdk/ipsec/ipsec.c create mode 100644 vnet/vnet/devices/dpdk/ipsec/ipsec.h (limited to 'dpdk/Makefile') diff --git a/build-data/packages/dpdk.mk b/build-data/packages/dpdk.mk index f9163c87..a529e365 100644 --- a/build-data/packages/dpdk.mk +++ b/build-data/packages/dpdk.mk @@ -22,6 +22,10 @@ DPDK_MAKE_ARGS = -C $(call find_source_fn,$(PACKAGE_SOURCE)) \ DPDK_TUNE=$(DPDK_TUNE) \ DPDK_DEBUG=$(DPDK_DEBUG) +DPDK_CRYPTO_PMD=$(strip $($(PLATFORM)_uses_dpdk_cryptodev)) +ifneq ($(DPDK_CRYPTO_PMD),) +DPDK_MAKE_ARGS += DPDK_CRYPTO_PMD=y +endif DPDK_PLATFORM_TARGET=$(strip $($(PLATFORM)_dpdk_target)) ifneq ($(DPDK_PLATFORM_TARGET),) diff --git a/build-data/packages/vnet.mk b/build-data/packages/vnet.mk index cafb9393..399ca1b4 100644 --- a/build-data/packages/vnet.mk +++ b/build-data/packages/vnet.mk @@ -38,4 +38,7 @@ vnet_configure_depend += dpdk-install vnet_CPPFLAGS += $(call installed_includes_fn, dpdk) vnet_LDFLAGS += $(call installed_libs_fn, dpdk) endif +ifeq ($($(PLATFORM)_uses_dpdk_cryptodev),yes) +vnet_configure_args += --with-dpdk-crypto +endif endif diff --git a/build-data/packages/vpp.mk b/build-data/packages/vpp.mk index be10e17d..6831c6b8 100644 --- a/build-data/packages/vpp.mk +++ b/build-data/packages/vpp.mk @@ -45,4 +45,7 @@ vpp_configure_depend += dpdk-install vpp_CPPFLAGS += $(call installed_includes_fn, dpdk) vpp_LDFLAGS += $(call installed_libs_fn, dpdk) endif +ifeq ($($(PLATFORM)_uses_dpdk_cryptodev),yes) +vpp_configure_args += --with-dpdk-crypto +endif endif diff --git a/build-data/platforms/vpp.mk b/build-data/platforms/vpp.mk index 4fe7cc8f..15d4dc39 100644 --- a/build-data/platforms/vpp.mk +++ b/build-data/platforms/vpp.mk @@ -41,6 +41,7 @@ vlib_configure_args_vpp = --with-pre-data=128 plugins_configure_args_vpp = --with-dpdk # DPDK configuration parameters +# vpp_uses_dpdk_cryptodev = yes # vpp_uses_external_dpdk = yes # vpp_dpdk_inc_dir = /usr/include/dpdk # vpp_dpdk_lib_dir = /usr/lib diff --git a/doxygen/user_doc.md b/doxygen/user_doc.md index 59063764..2e87c877 100644 --- a/doxygen/user_doc.md +++ b/doxygen/user_doc.md @@ -6,6 +6,7 @@ Several modules provide operational, dataplane-user focused documentation. - [GUI guided user demo](https://wiki.fd.io/view/VPP_Sandbox/vpp-userdemo) - @subpage qos_doc - @subpage ipsec_gre_doc +- @subpage dpdk_crypto_ipsec_doc - @subpage map_doc - @subpage lldp_doc - @subpage ioam_plugin_doc diff --git a/dpdk/Makefile b/dpdk/Makefile index 2f5037df..46cc3db1 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -21,6 +21,7 @@ DPDK_DOWNLOAD_DIR ?= $(HOME)/Downloads DPDK_MARCH ?= native DPDK_TUNE ?= generic DPDK_DEBUG ?= n +DPDK_CRYPTO_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) @@ -118,6 +119,9 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_LIBRTE_VMXNET3_DEBUG_INIT,$(DPDK_DEBUG)) $(call set,RTE_LIBRTE_PMD_BOND,y) $(call set,RTE_LIBRTE_IP_FRAG,y) + $(call set,RTE_LIBRTE_PMD_AESNI_MB,$(DPDK_CRYPTO_PMD)) + $(call set,RTE_LIBRTE_PMD_AESNI_GCM,$(DPDK_CRYPTO_PMD)) + $(call set,RTE_LIBRTE_PMD_QAT,$(DPDK_CRYPTO_PMD)) @# not needed $(call set,RTE_LIBRTE_TIMER,n) $(call set,RTE_LIBRTE_CFGFILE,n) diff --git a/vnet/Makefile.am b/vnet/Makefile.am index 86b73906..d806785f 100644 --- a/vnet/Makefile.am +++ b/vnet/Makefile.am @@ -13,7 +13,7 @@ AUTOMAKE_OPTIONS = foreign subdir-objects -AM_CFLAGS = -Wall -Werror @DPDK@ @IPSEC@ @IPV6SR@ +AM_CFLAGS = -Wall -Werror @DPDK@ @DPDK_CRYPTO@ @IPSEC@ @IPV6SR@ libvnet_la_SOURCES = libvnetplugin_la_SOURCES = @@ -345,7 +345,15 @@ libvnet_la_SOURCES += \ vnet/ipsec/ikev2_cli.c \ vnet/ipsec/ikev2_payload.c \ vnet/ipsec/ikev2_format.c -endif +if WITH_DPDK_CRYPTO +libvnet_la_SOURCES += \ + vnet/devices/dpdk/ipsec/esp_encrypt.c \ + vnet/devices/dpdk/ipsec/esp_decrypt.c \ + vnet/devices/dpdk/ipsec/crypto_node.c \ + vnet/devices/dpdk/ipsec/cli.c \ + vnet/devices/dpdk/ipsec/ipsec.c +endif +endif libvnet_la_SOURCES += \ vnet/ipsec/ipsec_output.c @@ -355,6 +363,11 @@ nobase_include_HEADERS += \ vnet/ipsec/esp.h \ vnet/ipsec/ikev2.h \ vnet/ipsec/ikev2_priv.h +if WITH_DPDK_CRYPTO +nobase_include_HEADERS += \ + vnet/devices/dpdk/ipsec/ipsec.h \ + vnet/devices/dpdk/ipsec/esp.h +endif ######################################## # Layer 3 protocol: osi diff --git a/vnet/configure.ac b/vnet/configure.ac index 80de43af..6a5281b0 100644 --- a/vnet/configure.ac +++ b/vnet/configure.ac @@ -12,6 +12,11 @@ AC_ARG_WITH(dpdk, [with_dpdk=1], [with_dpdk=0]) +AC_ARG_WITH(dpdk_crypto, + AC_HELP_STRING([--with-dpdk-crypto],[Use DPDK cryptodev]), + [with_dpdk_crypto=1], + [with_dpdk_crypto=0]) + AC_ARG_WITH(ipsec, AC_HELP_STRING([--without-ipsec],[Disable ipsec]), [with_ipsec=0], @@ -27,6 +32,9 @@ AC_ARG_ENABLE(tests, [enable_tests=1], [enable_tests=0]) +AM_CONDITIONAL(WITH_DPDK_CRYPTO, test "$with_dpdk_crypto" = "1") +AC_SUBST(DPDK_CRYPTO,[-DDPDK_CRYPTO=${with_dpdk_crypto}]) + AM_CONDITIONAL(WITH_DPDK, test "$with_dpdk" = "1") AC_SUBST(DPDK,[-DDPDK=${with_dpdk}]) diff --git a/vnet/vnet/devices/dpdk/ipsec/cli.c b/vnet/vnet/devices/dpdk/ipsec/cli.c new file mode 100644 index 00000000..3b634e03 --- /dev/null +++ b/vnet/vnet/devices/dpdk/ipsec/cli.c @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2016 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +static void +dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); + u32 i, skip_master; + + if (detail_display) + vlib_cli_output (vm, "worker\t%10s\t%15s\tdir\tdev\tqp\n", + "cipher", "auth"); + else + vlib_cli_output (vm, "worker\tcrypto device id(type)\n"); + + skip_master = vlib_num_workers () > 0; + + for (i = 0; i < tm->n_vlib_mains; i++) + { + uword key, data; + u32 cpu_index = vlib_mains[i]->cpu_index; + crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + u8 *s = 0; + + if (skip_master) + { + skip_master = 0; + continue; + } + + if (!detail_display) + { + i32 last_cdev = -1; + crypto_qp_data_t *qpd; + + s = format (s, "%u\t", cpu_index); + + /* *INDENT-OFF* */ + vec_foreach (qpd, cwm->qp_data) + { + u32 dev_id = qpd->dev_id; + + if ((u16) last_cdev != dev_id) + { + struct rte_cryptodev_info cdev_info; + + rte_cryptodev_info_get (dev_id, &cdev_info); + + s = format(s, "%u(%s)\t", dev_id, cdev_info.feature_flags & + RTE_CRYPTODEV_FF_HW_ACCELERATED ? "HW" : "SW"); + } + last_cdev = dev_id; + } + /* *INDENT-ON* */ + vlib_cli_output (vm, "%s", s); + } + else + { + char cipher_str[15], auth_str[15]; + struct rte_cryptodev_capabilities cap; + crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key; + /* *INDENT-OFF* */ + hash_foreach (key, data, cwm->algo_qp_map, + ({ + cap.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER; + cap.sym.cipher.algo = p_key->cipher_algo; + check_algo_is_supported (&cap, cipher_str); + cap.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH; + cap.sym.auth.algo = p_key->auth_algo; + check_algo_is_supported (&cap, auth_str); + vlib_cli_output (vm, "%u\t%10s\t%15s\t%3s\t%u\t%u\n", + vlib_mains[i]->cpu_index, cipher_str, auth_str, + p_key->is_outbound ? "out" : "in", + cwm->qp_data[data].dev_id, + cwm->qp_data[data].qp_id); + })); + /* *INDENT-ON* */ + } + } +} + +static clib_error_t * +lcore_cryptodev_map_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + u16 detail = 0; + + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "verbose")) + detail = 1; + else + return clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + } + + unformat_free (line_input); + + dpdk_ipsec_show_mapping (vm, detail); + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (lcore_cryptodev_map, static) = { + .path = "show crypto device mapping", + .short_help = + "show cryptodev device mapping ", + .function = lcore_cryptodev_map_fn, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/devices/dpdk/ipsec/crypto_node.c b/vnet/vnet/devices/dpdk/ipsec/crypto_node.c new file mode 100644 index 00000000..37d5e5fa --- /dev/null +++ b/vnet/vnet/devices/dpdk/ipsec/crypto_node.c @@ -0,0 +1,209 @@ +/* + *------------------------------------------------------------------ + * crypto_node.c - DPDK Cryptodev input node + * + * Copyright (c) 2016 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include + +#include + +#define foreach_dpdk_crypto_input_next \ + _(DROP, "error-drop") \ + _(ENCRYPT_POST, "dpdk-esp-encrypt-post") \ + _(DECRYPT_POST, "dpdk-esp-decrypt-post") + +typedef enum +{ +#define _(f,s) DPDK_CRYPTO_INPUT_NEXT_##f, + foreach_dpdk_crypto_input_next +#undef _ + DPDK_CRYPTO_INPUT_N_NEXT, +} dpdk_crypto_input_next_t; + +#define foreach_dpdk_crypto_input_error \ + _(DQ_COPS, "Crypto ops dequeued") \ + _(COP_FAILED, "Crypto op failed") + +typedef enum +{ +#define _(f,s) DPDK_CRYPTO_INPUT_ERROR_##f, + foreach_dpdk_crypto_input_error +#undef _ + DPDK_CRYPTO_INPUT_N_ERROR, +} dpdk_crypto_input_error_t; + +static char *dpdk_crypto_input_error_strings[] = { +#define _(n, s) s, + foreach_dpdk_crypto_input_error +#undef _ +}; + +vlib_node_registration_t dpdk_crypto_input_node; + +typedef struct +{ + u32 cdev; + u32 qp; + u32 status; + u32 sa_idx; + u32 next_index; +} dpdk_crypto_input_trace_t; + +static u8 * +format_dpdk_crypto_input_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + dpdk_crypto_input_trace_t *t = va_arg (*args, dpdk_crypto_input_trace_t *); + + s = format (s, "dpdk_crypto: cryptodev-id %u queue-pair %u next-index %d", + t->cdev, t->qp, t->next_index); + + s = format (s, "status %u sa-idx %u\n", t->status, t->sa_idx); + + return s; +} + +static_always_inline u32 +dpdk_crypto_dequeue (vlib_main_t * vm, vlib_node_runtime_t * node, + crypto_qp_data_t * qpd) +{ + u32 n_deq, *to_next = 0, next_index, n_cops, def_next_index; + struct rte_crypto_op **cops = qpd->cops; + + if (qpd->inflights == 0) + return 0; + + if (qpd->is_outbound) + def_next_index = DPDK_CRYPTO_INPUT_NEXT_ENCRYPT_POST; + else + def_next_index = DPDK_CRYPTO_INPUT_NEXT_DECRYPT_POST; + + n_cops = rte_cryptodev_dequeue_burst (qpd->dev_id, qpd->qp_id, + cops, VLIB_FRAME_SIZE); + n_deq = n_cops; + next_index = def_next_index; + + qpd->inflights -= n_cops; + ASSERT (qpd->inflights >= 0); + + while (n_cops > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_cops > 0 && n_left_to_next > 0) + { + u32 bi0, next0; + vlib_buffer_t *b0 = 0; + struct rte_crypto_op *cop; + struct rte_crypto_sym_op *sym_cop; + + cop = cops[0]; + cops += 1; + n_cops -= 1; + n_left_to_next -= 1; + + next0 = def_next_index; + + if (PREDICT_FALSE (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) + { + next0 = DPDK_CRYPTO_INPUT_NEXT_DROP; + vlib_node_increment_counter (vm, dpdk_crypto_input_node.index, + DPDK_CRYPTO_INPUT_ERROR_COP_FAILED, + 1); + } + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + sym_cop = (struct rte_crypto_sym_op *) (cop + 1); + b0 = vlib_buffer_from_rte_mbuf (sym_cop->m_src); + bi0 = vlib_get_buffer_index (vm, b0); + + to_next[0] = bi0; + to_next += 1; + + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + dpdk_crypto_input_trace_t *tr; + tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->cdev = qpd->dev_id; + tr->qp = qpd->qp_id; + tr->status = cop->status; + tr->next_index = next0; + tr->sa_idx = vnet_buffer (b0)->ipsec.sad_index; + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + crypto_free_cop (qpd, qpd->cops, n_deq); + + vlib_node_increment_counter (vm, dpdk_crypto_input_node.index, + DPDK_CRYPTO_INPUT_ERROR_DQ_COPS, n_deq); + return n_deq; +} + +static uword +dpdk_crypto_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + u32 cpu_index = os_get_cpu_number (); + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + crypto_qp_data_t *qpd; + u32 n_deq = 0; + + /* *INDENT-OFF* */ + vec_foreach (qpd, cwm->qp_data) + n_deq += dpdk_crypto_dequeue(vm, node, qpd); + /* *INDENT-ON* */ + + return n_deq; +} + +VLIB_REGISTER_NODE (dpdk_crypto_input_node) = +{ + .function = dpdk_crypto_input_fn,.name = "dpdk-crypto-input",.format_trace = + format_dpdk_crypto_input_trace,.type = VLIB_NODE_TYPE_INPUT,.state = + VLIB_NODE_STATE_DISABLED,.n_errors = + DPDK_CRYPTO_INPUT_N_ERROR,.error_strings = + dpdk_crypto_input_error_strings,.n_next_nodes = + DPDK_CRYPTO_INPUT_N_NEXT,.next_nodes = + { +#define _(s,n) [DPDK_CRYPTO_INPUT_NEXT_##s] = n, + foreach_dpdk_crypto_input_next +#undef _ + } +,}; + +#if DPDK_CRYPTO==1 +VLIB_NODE_FUNCTION_MULTIARCH (dpdk_crypto_input_node, dpdk_crypto_input_fn) +#endif +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/devices/dpdk/ipsec/dir.dox b/vnet/vnet/devices/dpdk/ipsec/dir.dox new file mode 100644 index 00000000..ffebfc4d --- /dev/null +++ b/vnet/vnet/devices/dpdk/ipsec/dir.dox @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2016 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + @dir vnet/vnet/devices/dpdk/ipsec + @brief IPSec ESP encrypt/decrypt using DPDK Cryptodev API +*/ diff --git a/vnet/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md b/vnet/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md new file mode 100644 index 00000000..8089696f --- /dev/null +++ b/vnet/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md @@ -0,0 +1,73 @@ +# VPP IPSec implementation using DPDK Cryptodev API {#dpdk_crypto_ipsec_doc} + +This document is meant to contain all related information about implementation and usability. + + +## VPP IPsec with DPDK Cryptodev + +DPDK Cryptodev is an asynchronous crypto API that supports both Hardware and Software implementations (for more details refer to [DPDK Cryptography Device Library documentation](http://dpdk.org/doc/guides/prog_guide/cryptodev_lib.html)). + +When DPDK Cryptodev support is enabled, the node graph is modified by adding and replacing some of the nodes. + +The following nodes are replaced: +* esp-encrypt -> dpdk-esp-encrypt +* esp-decrypt -> dpdk-esp-decrypt + +The following nodes are added: +* dpdk-crypto-input : polling input node, basically dequeuing from crypto devices. +* dpdk-esp-encrypt-post : internal node. +* dpdk-esp-decrypt-post : internal node. + + +### How to enable VPP IPSec with DPDK Cryptodev support + +To enable DPDK Cryptodev support (disabled by default), we need the following env option: + + vpp_uses_dpdk_cryptodev=yes + +A couple of ways to achive this: +* uncomment/add it in the platforms config (ie. build-data/platforms/vpp.mk) +* set the option when building vpp (ie. make vpp_uses_dpdk_cryptodev=yes build-release) + + +### Crypto Resources allocation + +VPP allocates crypto resources based on a best effort approach: +* first allocate Hardware crypto resources, then Software. +* if there are not enough crypto resources for all workers, all packets will be dropped if they reach ESP encrypt/decrypt nodes, displaying the warning: + + 0: dpdk_ipsec_init: not enough cryptodevs for ipsec + + +### Configuration example + +No especial IPsec configuration is required. + +Once DPDK Cryptodev is enabled, the user just needs to provide cryptodevs in the startup.conf. + +Example startup.conf: + +``` +dpdk { + socket-mem 1024,1024 + num-mbufs 131072 + dev 0000:81:00.0 + dev 0000:81:00.1 + dev 0000:85:01.0 + dev 0000:85:01.1 + vdev cryptodev_aesni_mb_pmd,socket_id=1 + vdev cryptodev_aesni_mb_pmd,socket_id=1 +} +``` + +In the above configuration: +* 0000:85:01.0 and 0000:85:01.1 are crypto BDFs and they require the same driver binding as DPDK Ethernet devices but they do not support any extra configuration options. +* Two AESNI-MB Software Cryptodev PMDs are created in NUMA node 1. + +For further details refer to [DPDK Crypto Device Driver documentation](http://dpdk.org/doc/guides/cryptodevs/index.html) + +### Operational data + +The following CLI command displays the Cryptodev/Worker mapping: + + show crypto device mapping [verbose] diff --git a/vnet/vnet/devices/dpdk/ipsec/esp.h b/vnet/vnet/devices/dpdk/ipsec/esp.h new file mode 100644 index 00000000..71282ac0 --- /dev/null +++ b/vnet/vnet/devices/dpdk/ipsec/esp.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2016 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __DPDK_ESP_H__ +#define __DPDK_ESP_H__ + +#include +#include +#include + +typedef struct +{ + enum rte_crypto_cipher_algorithm algo; + u8 key_len; + u8 iv_len; +} dpdk_esp_crypto_alg_t; + +typedef struct +{ + enum rte_crypto_auth_algorithm algo; + u8 trunc_size; +} dpdk_esp_integ_alg_t; + +typedef struct +{ + dpdk_esp_crypto_alg_t *esp_crypto_algs; + dpdk_esp_integ_alg_t *esp_integ_algs; +} dpdk_esp_main_t; + +dpdk_esp_main_t dpdk_esp_main; + +static_always_inline void +dpdk_esp_init () +{ + dpdk_esp_main_t *em = &dpdk_esp_main; + dpdk_esp_integ_alg_t *i; + dpdk_esp_crypto_alg_t *c; + + vec_validate (em->esp_crypto_algs, IPSEC_CRYPTO_N_ALG - 1); + + c = &em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128]; + c->algo = RTE_CRYPTO_CIPHER_AES_CBC; + c->key_len = 16; + c->iv_len = 16; + + c = &em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192]; + c->algo = RTE_CRYPTO_CIPHER_AES_CBC; + c->key_len = 24; + c->iv_len = 16; + + c = &em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256]; + c->algo = RTE_CRYPTO_CIPHER_AES_CBC; + c->key_len = 32; + c->iv_len = 16; + + vec_validate (em->esp_integ_algs, IPSEC_INTEG_N_ALG - 1); + + i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA1_96]; + i->algo = RTE_CRYPTO_AUTH_SHA1_HMAC; + i->trunc_size = 12; + + i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_96]; + i->algo = RTE_CRYPTO_AUTH_SHA256_HMAC; + i->trunc_size = 12; + + i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_128]; + i->algo = RTE_CRYPTO_AUTH_SHA256_HMAC; + i->trunc_size = 16; + + i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_384_192]; + i->algo = RTE_CRYPTO_AUTH_SHA384_HMAC; + i->trunc_size = 24; + + i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256]; + i->algo = RTE_CRYPTO_AUTH_SHA512_HMAC; + i->trunc_size = 32; +} + +static_always_inline int +add_del_sa_sess (u32 sa_index, u8 is_add) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_worker_main_t *cwm; + u8 skip_master = vlib_num_workers () > 0; + + /* *INDENT-OFF* */ + vec_foreach (cwm, dcm->workers_main) + { + crypto_sa_session_t *sa_sess; + u8 is_outbound; + + if (skip_master) + { + skip_master = 0; + continue; + } + + for (is_outbound = 0; is_outbound < 2; is_outbound++) + { + if (is_add) + { + pool_get (cwm->sa_sess_d[is_outbound], sa_sess); + } + else + { + u8 dev_id; + + sa_sess = pool_elt_at_index (cwm->sa_sess_d[is_outbound], sa_index); + dev_id = cwm->qp_data[sa_sess->qp_index].dev_id; + + if (!sa_sess->sess) + continue; + + if (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess)) + { + clib_warning("failed to free session"); + return -1; + } + memset(sa_sess, 0, sizeof(sa_sess[0])); + } + } + } + /* *INDENT-OFF* */ + + return 0; +} + +static_always_inline int +translate_crypto_algo(ipsec_crypto_alg_t crypto_algo, + struct rte_crypto_sym_xform *cipher_xform) +{ + switch (crypto_algo) + { + case IPSEC_CRYPTO_ALG_NONE: + cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_NULL; + break; + case IPSEC_CRYPTO_ALG_AES_CBC_128: + case IPSEC_CRYPTO_ALG_AES_CBC_192: + case IPSEC_CRYPTO_ALG_AES_CBC_256: + cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; + break; + default: + return -1; + } + + cipher_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + + return 0; +} + +static_always_inline int +translate_integ_algo(ipsec_integ_alg_t integ_alg, + struct rte_crypto_sym_xform *auth_xform) +{ + switch (integ_alg) { + case IPSEC_INTEG_ALG_NONE: + auth_xform->auth.algo = RTE_CRYPTO_AUTH_NULL; + auth_xform->auth.digest_length = 0; + break; + case IPSEC_INTEG_ALG_SHA1_96: + auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC; + auth_xform->auth.digest_length = 12; + break; + case IPSEC_INTEG_ALG_SHA_256_96: + auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA256_HMAC; + auth_xform->auth.digest_length = 12; + break; + case IPSEC_INTEG_ALG_SHA_256_128: + auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA256_HMAC; + auth_xform->auth.digest_length = 16; + break; + case IPSEC_INTEG_ALG_SHA_384_192: + auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA384_HMAC; + auth_xform->auth.digest_length = 24; + break; + case IPSEC_INTEG_ALG_SHA_512_256: + auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA512_HMAC; + auth_xform->auth.digest_length = 32; + break; + default: + return -1; + } + + auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; + + return 0; +} + +static_always_inline int +create_sym_sess(ipsec_sa_t *sa, crypto_sa_session_t *sa_sess, u8 is_outbound) +{ + u32 cpu_index = os_get_cpu_number(); + dpdk_crypto_main_t * dcm = &dpdk_crypto_main; + crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + struct rte_crypto_sym_xform cipher_xform = {0}; + struct rte_crypto_sym_xform auth_xform = {0}; + struct rte_crypto_sym_xform *xfs; + uword key = 0, *data; + crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *)&key; + + cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + cipher_xform.cipher.key.data = sa->crypto_key; + cipher_xform.cipher.key.length = sa->crypto_key_len; + + auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; + auth_xform.auth.key.data = sa->integ_key; + auth_xform.auth.key.length = sa->integ_key_len; + + if (translate_crypto_algo(sa->crypto_alg, &cipher_xform) < 0) + return -1; + p_key->cipher_algo = cipher_xform.cipher.algo; + + if (translate_integ_algo(sa->integ_alg, &auth_xform) < 0) + return -1; + p_key->auth_algo = auth_xform.auth.algo; + + if (is_outbound) + { + cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; + cipher_xform.next = &auth_xform; + xfs = &cipher_xform; + } + else + { + cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; + auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; + auth_xform.next = &cipher_xform; + xfs = &auth_xform; + } + + p_key->is_outbound = is_outbound; + + data = hash_get(cwm->algo_qp_map, key); + if (!data) + return -1; + + sa_sess->sess = + rte_cryptodev_sym_session_create(cwm->qp_data[*data].dev_id, xfs); + + if (!sa_sess->sess) + return -1; + + sa_sess->qp_index = (u8)*data; + + return 0; +} + +#endif /* __DPDK_ESP_H__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/devices/dpdk/ipsec/esp_decrypt.c b/vnet/vnet/devices/dpdk/ipsec/esp_decrypt.c new file mode 100644 index 00000000..c898d05e --- /dev/null +++ b/vnet/vnet/devices/dpdk/ipsec/esp_decrypt.c @@ -0,0 +1,549 @@ +/* + * esp_decrypt.c : IPSec ESP Decrypt node using DPDK Cryptodev + * + * Copyright (c) 2016 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include + +#define foreach_esp_decrypt_next \ +_(DROP, "error-drop") \ +_(IP4_INPUT, "ip4-input") \ +_(IP6_INPUT, "ip6-input") + +#define _(v, s) ESP_DECRYPT_NEXT_##v, +typedef enum { + foreach_esp_decrypt_next +#undef _ + ESP_DECRYPT_N_NEXT, +} esp_decrypt_next_t; + +#define foreach_esp_decrypt_error \ + _(RX_PKTS, "ESP pkts received") \ + _(DECRYPTION_FAILED, "ESP decryption failed") \ + _(REPLAY, "SA replayed packet") \ + _(NOT_IP, "Not IP packet (dropped)") \ + _(ENQ_FAIL, "Enqueue failed (buffer full)") \ + _(NO_CRYPTODEV, "Cryptodev not configured") \ + _(BAD_LEN, "Invalid ciphertext length") \ + _(UNSUPPORTED, "Cipher/Auth not supported") + + +typedef enum { +#define _(sym,str) ESP_DECRYPT_ERROR_##sym, + foreach_esp_decrypt_error +#undef _ + ESP_DECRYPT_N_ERROR, +} esp_decrypt_error_t; + +static char * esp_decrypt_error_strings[] = { +#define _(sym,string) string, + foreach_esp_decrypt_error +#undef _ +}; + +vlib_node_registration_t dpdk_esp_decrypt_node; + +typedef struct { + ipsec_crypto_alg_t crypto_alg; + ipsec_integ_alg_t integ_alg; +} esp_decrypt_trace_t; + +/* packet trace format function */ +static u8 * format_esp_decrypt_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + esp_decrypt_trace_t * t = va_arg (*args, esp_decrypt_trace_t *); + + s = format (s, "esp: crypto %U integrity %U", + format_ipsec_crypto_alg, t->crypto_alg, + format_ipsec_integ_alg, t->integ_alg); + return s; +} + +static uword +dpdk_esp_decrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, *from, *to_next, next_index; + ipsec_main_t *im = &ipsec_main; + u32 cpu_index = os_get_cpu_number(); + dpdk_crypto_main_t * dcm = &dpdk_crypto_main; + dpdk_esp_main_t * em = &dpdk_esp_main; + u32 i; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + if (PREDICT_FALSE(!dcm->workers_main)) + { + vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + ESP_DECRYPT_ERROR_NO_CRYPTODEV, n_left_from); + vlib_buffer_free(vm, from, n_left_from); + return n_left_from; + } + + crypto_worker_main_t *cwm = vec_elt_at_index(dcm->workers_main, cpu_index); + u32 n_qps = vec_len(cwm->qp_data); + struct rte_crypto_op ** cops_to_enq[n_qps]; + u32 n_cop_qp[n_qps], * bi_to_enq[n_qps]; + + for (i = 0; i < n_qps; i++) + { + bi_to_enq[i] = cwm->qp_data[i].bi; + cops_to_enq[i] = cwm->qp_data[i].cops; + } + + memset(n_cop_qp, 0, n_qps * sizeof(u32)); + + crypto_alloc_cops(); + + next_index = ESP_DECRYPT_NEXT_DROP; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0, sa_index0 = ~0, seq, icv_size; + vlib_buffer_t * b0; + esp_header_t * esp0; + ipsec_sa_t * sa0; + struct rte_mbuf * mb0 = 0; + const int BLOCK_SIZE = 16; + const int IV_SIZE = 16; + crypto_sa_session_t * sa_sess; + void * sess; + u16 qp_index; + struct rte_crypto_op * cop = 0; + + bi0 = from[0]; + from += 1; + n_left_from -= 1; + + b0 = vlib_get_buffer (vm, bi0); + esp0 = vlib_buffer_get_current (b0); + + sa_index0 = vnet_buffer(b0)->ipsec.sad_index; + sa0 = pool_elt_at_index (im->sad, sa_index0); + + seq = clib_host_to_net_u32(esp0->seq); + + /* anti-replay check */ + if (sa0->use_anti_replay) + { + int rv = 0; + + if (PREDICT_TRUE(sa0->use_esn)) + rv = esp_replay_check_esn(sa0, seq); + else + rv = esp_replay_check(sa0, seq); + + if (PREDICT_FALSE(rv)) + { + clib_warning ("anti-replay SPI %u seq %u", sa0->spi, seq); + vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + ESP_DECRYPT_ERROR_REPLAY, 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + } + + if (PREDICT_FALSE(sa0->integ_alg == IPSEC_INTEG_ALG_NONE) || + PREDICT_FALSE(sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE)) + { + clib_warning ("SPI %u : only cipher + auth supported", sa0->spi); + vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + ESP_DECRYPT_ERROR_UNSUPPORTED, 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + sa_sess = pool_elt_at_index(cwm->sa_sess_d[0], sa_index0); + + if (PREDICT_FALSE(!sa_sess->sess)) + { + int ret = create_sym_sess(sa0, sa_sess, 0); + ASSERT(ret == 0); + } + + sess = sa_sess->sess; + qp_index = sa_sess->qp_index; + + ASSERT (vec_len (vec_elt (cwm->qp_data, qp_index).free_cops) > 0); + cop = vec_pop (vec_elt (cwm->qp_data, qp_index).free_cops); + ASSERT (cop->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED); + + cops_to_enq[qp_index][0] = cop; + cops_to_enq[qp_index] += 1; + n_cop_qp[qp_index] += 1; + bi_to_enq[qp_index][0] = bi0; + bi_to_enq[qp_index] += 1; + + rte_crypto_op_attach_sym_session(cop, sess); + + icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size; + + /* Convert vlib buffer to mbuf */ + mb0 = rte_mbuf_from_vlib_buffer(b0); + mb0->data_len = b0->current_length; + mb0->pkt_len = b0->current_length; + mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data; + + /* Outer IP header has already been stripped */ + u16 payload_len = rte_pktmbuf_pkt_len(mb0) - sizeof (esp_header_t) - + IV_SIZE - icv_size; + + if ((payload_len & (BLOCK_SIZE - 1)) || (payload_len <= 0)) + { + clib_warning ("payload %u not multiple of %d\n", + payload_len, BLOCK_SIZE); + vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + ESP_DECRYPT_ERROR_BAD_LEN, 1); + vec_add (vec_elt (cwm->qp_data, qp_index).free_cops, &cop, 1); + bi_to_enq[qp_index] -= 1; + cops_to_enq[qp_index] -= 1; + n_cop_qp[qp_index] -= 1; + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + struct rte_crypto_sym_op *sym_cop = (struct rte_crypto_sym_op *)(cop + 1); + + sym_cop->m_src = mb0; + sym_cop->cipher.data.offset = sizeof (esp_header_t) + IV_SIZE; + sym_cop->cipher.data.length = payload_len; + + sym_cop->cipher.iv.data = + rte_pktmbuf_mtod_offset(mb0, void*, sizeof (esp_header_t)); + sym_cop->cipher.iv.phys_addr = + rte_pktmbuf_mtophys_offset(mb0, sizeof (esp_header_t)); + sym_cop->cipher.iv.length = IV_SIZE; + + if (sa0->use_esn) + { + dpdk_cop_priv_t* priv = (dpdk_cop_priv_t*) (sym_cop + 1); + u8* payload_end = + rte_pktmbuf_mtod_offset(mb0, u8*, sizeof(esp_header_t) + IV_SIZE + + payload_len); + + memcpy (priv->icv, payload_end, icv_size); + *((u32*) payload_end) = sa0->seq_hi; + sym_cop->auth.data.offset = 0; + sym_cop->auth.data.length = + sizeof(esp_header_t) + IV_SIZE + payload_len + sizeof(sa0->seq_hi); + sym_cop->auth.digest.data = priv->icv; + sym_cop->auth.digest.phys_addr = + cop->phys_addr + (uintptr_t) priv->icv - (uintptr_t) cop; + sym_cop->auth.digest.length = icv_size; + } + else + { + sym_cop->auth.data.offset = 0; + sym_cop->auth.data.length = sizeof(esp_header_t) + + IV_SIZE + payload_len; + + sym_cop->auth.digest.data = + rte_pktmbuf_mtod_offset(mb0, void*, + rte_pktmbuf_pkt_len(mb0) - icv_size); + sym_cop->auth.digest.phys_addr = + rte_pktmbuf_mtophys_offset(mb0, + rte_pktmbuf_pkt_len(mb0) - icv_size); + sym_cop->auth.digest.length = icv_size; + } + +trace: + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_decrypt_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->crypto_alg = sa0->crypto_alg; + tr->integ_alg = sa0->integ_alg; + } + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + ESP_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + crypto_qp_data_t *qpd; + /* *INDENT-OFF* */ + vec_foreach_index (i, cwm->qp_data) + { + u32 enq; + + qpd = vec_elt_at_index(cwm->qp_data, i); + enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id, + qpd->cops, n_cop_qp[i]); + qpd->inflights += enq; + + if (PREDICT_FALSE(enq < n_cop_qp[i])) + { + crypto_free_cop (qpd, &qpd->cops[enq], n_cop_qp[i] - enq); + vlib_buffer_free (vm, &qpd->bi[enq], n_cop_qp[i] - enq); + + vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + ESP_DECRYPT_ERROR_ENQ_FAIL, + n_cop_qp[i] - enq); + } + } + /* *INDENT-ON* */ + + return from_frame->n_vectors; +} + +VLIB_REGISTER_NODE (dpdk_esp_decrypt_node) = { + .function = dpdk_esp_decrypt_node_fn, + .name = "dpdk-esp-decrypt", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_error_strings), + .error_strings = esp_decrypt_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_decrypt_node, dpdk_esp_decrypt_node_fn) + +/* + * Decrypt Post Node + */ + +#define foreach_esp_decrypt_post_error \ + _(PKTS, "ESP post pkts") + +typedef enum { +#define _(sym,str) ESP_DECRYPT_POST_ERROR_##sym, + foreach_esp_decrypt_post_error +#undef _ + ESP_DECRYPT_POST_N_ERROR, +} esp_decrypt_post_error_t; + +static char * esp_decrypt_post_error_strings[] = { +#define _(sym,string) string, + foreach_esp_decrypt_post_error +#undef _ +}; + +vlib_node_registration_t dpdk_esp_decrypt_post_node; + +static u8 * format_esp_decrypt_post_trace (u8 * s, va_list * args) +{ + return s; +} + +static uword +dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, *from, *to_next = 0, next_index; + ipsec_sa_t * sa0; + u32 sa_index0 = ~0; + ipsec_main_t *im = &ipsec_main; + dpdk_esp_main_t *em = &dpdk_esp_main; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + esp_footer_t * f0; + const u32 IV_SIZE = 16; + u32 bi0, next0, icv_size; + vlib_buffer_t * b0 = 0; + ip4_header_t *ih4 = 0, *oh4 = 0; + ip6_header_t *ih6 = 0, *oh6 = 0; + u8 tunnel_mode = 1; + u8 transport_ip6 = 0; + + next0 = ESP_DECRYPT_NEXT_DROP; + + bi0 = from[0]; + from += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + sa_index0 = vnet_buffer(b0)->ipsec.sad_index; + sa0 = pool_elt_at_index (im->sad, sa_index0); + + to_next[0] = bi0; + to_next += 1; + + icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size; + + if (sa0->use_anti_replay) + { + esp_header_t * esp0 = vlib_buffer_get_current (b0); + u32 seq; + seq = clib_host_to_net_u32(esp0->seq); + if (PREDICT_TRUE(sa0->use_esn)) + esp_replay_advance_esn(sa0, seq); + else + esp_replay_advance(sa0, seq); + } + + ih4 = (ip4_header_t *) (b0->data + sizeof(ethernet_header_t)); + vlib_buffer_advance (b0, sizeof (esp_header_t) + IV_SIZE); + + b0->current_length -= (icv_size + 2); + b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID; + f0 = (esp_footer_t *) ((u8 *) vlib_buffer_get_current (b0) + + b0->current_length); + b0->current_length -= f0->pad_length; + + /* transport mode */ + if (PREDICT_FALSE(!sa0->is_tunnel && !sa0->is_tunnel_ip6)) + { + tunnel_mode = 0; + + if (PREDICT_TRUE((ih4->ip_version_and_header_length & 0xF0) != 0x40)) + { + if (PREDICT_TRUE((ih4->ip_version_and_header_length & 0xF0) == 0x60)) + transport_ip6 = 1; + else + { + clib_warning("next header: 0x%x", f0->next_header); + vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + ESP_DECRYPT_ERROR_NOT_IP, 1); + goto trace; + } + } + } + + if (PREDICT_TRUE (tunnel_mode)) + { + if (PREDICT_TRUE(f0->next_header == IP_PROTOCOL_IP_IN_IP)) + next0 = ESP_DECRYPT_NEXT_IP4_INPUT; + else if (f0->next_header == IP_PROTOCOL_IPV6) + next0 = ESP_DECRYPT_NEXT_IP6_INPUT; + else + { + clib_warning("next header: 0x%x", f0->next_header); + vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + goto trace; + } + } + /* transport mode */ + else + { + if (PREDICT_FALSE(transport_ip6)) + { + next0 = ESP_DECRYPT_NEXT_IP6_INPUT; + ih6 = (ip6_header_t *) (b0->data + sizeof(ethernet_header_t)); + vlib_buffer_advance (b0, -sizeof(ip6_header_t)); + oh6 = vlib_buffer_get_current (b0); + memmove(oh6, ih6, sizeof(ip6_header_t)); + + oh6->protocol = f0->next_header; + oh6->payload_length = + clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0) - + sizeof (ip6_header_t)); + } + else + { + next0 = ESP_DECRYPT_NEXT_IP4_INPUT; + vlib_buffer_advance (b0, -sizeof(ip4_header_t)); + oh4 = vlib_buffer_get_current (b0); + + oh4->ip_version_and_header_length = 0x45; + oh4->tos = ih4->tos; + oh4->fragment_id = 0; + oh4->flags_and_fragment_offset = 0; + oh4->ttl = ih4->ttl; + oh4->protocol = f0->next_header; + oh4->src_address.as_u32 = ih4->src_address.as_u32; + oh4->dst_address.as_u32 = ih4->dst_address.as_u32; + oh4->length = + clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); + oh4->checksum = ip4_header_checksum (oh4); + } + } + + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32)~0; + +trace: + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_decrypt_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->crypto_alg = sa0->crypto_alg; + tr->integ_alg = sa0->integ_alg; + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, bi0, next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + vlib_node_increment_counter (vm, dpdk_esp_decrypt_post_node.index, + ESP_DECRYPT_POST_ERROR_PKTS, + from_frame->n_vectors); + + return from_frame->n_vectors; +} + +VLIB_REGISTER_NODE (dpdk_esp_decrypt_post_node) = { + .function = dpdk_esp_decrypt_post_node_fn, + .name = "dpdk-esp-decrypt-post", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_post_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings), + .error_strings = esp_decrypt_post_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_decrypt_post_node, dpdk_esp_decrypt_post_node_fn) diff --git a/vnet/vnet/devices/dpdk/ipsec/esp_encrypt.c b/vnet/vnet/devices/dpdk/ipsec/esp_encrypt.c new file mode 100644 index 00000000..aef4b90d --- /dev/null +++ b/vnet/vnet/devices/dpdk/ipsec/esp_encrypt.c @@ -0,0 +1,554 @@ +/* + * esp_encrypt.c : IPSec ESP encrypt node using DPDK Cryptodev + * + * Copyright (c) 2016 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include + +#define foreach_esp_encrypt_next \ +_(DROP, "error-drop") \ +_(IP4_INPUT, "ip4-input") \ +_(IP6_INPUT, "ip6-input") \ +_(INTERFACE_OUTPUT, "interface-output") + +#define _(v, s) ESP_ENCRYPT_NEXT_##v, +typedef enum +{ + foreach_esp_encrypt_next +#undef _ + ESP_ENCRYPT_N_NEXT, +} esp_encrypt_next_t; + +#define foreach_esp_encrypt_error \ + _(RX_PKTS, "ESP pkts received") \ + _(SEQ_CYCLED, "sequence number cycled") \ + _(ENQ_FAIL, "Enqueue failed (buffer full)") \ + _(NO_CRYPTODEV, "Cryptodev not configured") \ + _(UNSUPPORTED, "Cipher/Auth not supported") + + +typedef enum +{ +#define _(sym,str) ESP_ENCRYPT_ERROR_##sym, + foreach_esp_encrypt_error +#undef _ + ESP_ENCRYPT_N_ERROR, +} esp_encrypt_error_t; + +static char *esp_encrypt_error_strings[] = { +#define _(sym,string) string, + foreach_esp_encrypt_error +#undef _ +}; + +vlib_node_registration_t dpdk_esp_encrypt_node; + +typedef struct +{ + u32 spi; + u32 seq; + ipsec_crypto_alg_t crypto_alg; + ipsec_integ_alg_t integ_alg; +} esp_encrypt_trace_t; + +/* packet trace format function */ +static u8 * +format_esp_encrypt_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *); + + s = format (s, "esp: spi %u seq %u crypto %U integrity %U", + t->spi, t->seq, + format_ipsec_crypto_alg, t->crypto_alg, + format_ipsec_integ_alg, t->integ_alg); + return s; +} + +static uword +dpdk_esp_encrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, *from, *to_next, next_index; + ipsec_main_t *im = &ipsec_main; + u32 cpu_index = os_get_cpu_number (); + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + dpdk_esp_main_t *em = &dpdk_esp_main; + u32 i; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + if (PREDICT_FALSE (!dcm->workers_main)) + { + /* Likely there are not enough cryptodevs, so drop frame */ + vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index, + ESP_ENCRYPT_ERROR_NO_CRYPTODEV, + n_left_from); + vlib_buffer_free (vm, from, n_left_from); + return n_left_from; + } + + crypto_worker_main_t *cwm = vec_elt_at_index (dcm->workers_main, cpu_index); + u32 n_qps = vec_len (cwm->qp_data); + struct rte_crypto_op **cops_to_enq[n_qps]; + u32 n_cop_qp[n_qps], *bi_to_enq[n_qps]; + + for (i = 0; i < n_qps; i++) + { + bi_to_enq[i] = cwm->qp_data[i].bi; + cops_to_enq[i] = cwm->qp_data[i].cops; + } + + memset (n_cop_qp, 0, n_qps * sizeof (u32)); + + crypto_alloc_cops (); + + next_index = ESP_ENCRYPT_NEXT_DROP; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0, next0; + vlib_buffer_t *b0 = 0; + u32 sa_index0; + ipsec_sa_t *sa0; + ip4_and_esp_header_t *ih0, *oh0 = 0; + ip6_and_esp_header_t *ih6_0, *oh6_0 = 0; + struct rte_mbuf *mb0 = 0; + esp_footer_t *f0; + u8 is_ipv6; + u8 ip_hdr_size; + u8 next_hdr_type; + u8 transport_mode = 0; + const int BLOCK_SIZE = 16; + const int IV_SIZE = 16; + u16 orig_sz; + crypto_sa_session_t *sa_sess; + void *sess; + struct rte_crypto_op *cop = 0; + u16 qp_index; + + bi0 = from[0]; + from += 1; + n_left_from -= 1; + + b0 = vlib_get_buffer (vm, bi0); + sa_index0 = vnet_buffer (b0)->ipsec.sad_index; + sa0 = pool_elt_at_index (im->sad, sa_index0); + + if (PREDICT_FALSE (esp_seq_advance (sa0))) + { + clib_warning ("sequence number counter has cycled SPI %u", + sa0->spi); + vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index, + ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); + //TODO: rekey SA + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + sa_sess = pool_elt_at_index (cwm->sa_sess_d[1], sa_index0); + if (PREDICT_FALSE (!sa_sess->sess)) + { + int ret = create_sym_sess (sa0, sa_sess, 1); + ASSERT (ret == 0); + } + + qp_index = sa_sess->qp_index; + sess = sa_sess->sess; + + ASSERT (vec_len (vec_elt (cwm->qp_data, qp_index).free_cops) > 0); + cop = vec_pop (vec_elt (cwm->qp_data, qp_index).free_cops); + ASSERT (cop->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED); + + cops_to_enq[qp_index][0] = cop; + cops_to_enq[qp_index] += 1; + n_cop_qp[qp_index] += 1; + bi_to_enq[qp_index][0] = bi0; + bi_to_enq[qp_index] += 1; + + ssize_t adv; + ih0 = vlib_buffer_get_current (b0); + orig_sz = b0->current_length; + is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60; + /* is ipv6 */ + if (PREDICT_TRUE (sa0->is_tunnel)) + { + if (PREDICT_TRUE (!is_ipv6)) + adv = -sizeof (ip4_and_esp_header_t); + else + adv = -sizeof (ip6_and_esp_header_t); + } + else + { + adv = -sizeof (esp_header_t); + if (PREDICT_TRUE (!is_ipv6)) + orig_sz -= sizeof (ip4_header_t); + else + orig_sz -= sizeof (ip6_header_t); + } + + /*transport mode save the eth header before it is overwritten */ + if (PREDICT_FALSE (!sa0->is_tunnel)) + { + ethernet_header_t *ieh0 = (ethernet_header_t *) + ((u8 *) vlib_buffer_get_current (b0) - + sizeof (ethernet_header_t)); + ethernet_header_t *oeh0 = + (ethernet_header_t *) ((u8 *) ieh0 + (adv - IV_SIZE)); + clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t)); + } + + vlib_buffer_advance (b0, adv - IV_SIZE); + + /* XXX IP6/ip4 and IP4/IP6 not supported, only IP4/IP4 and IP6/IP6 */ + + /* is ipv6 */ + if (PREDICT_FALSE (is_ipv6)) + { + ih6_0 = (ip6_and_esp_header_t *) ih0; + ip_hdr_size = sizeof (ip6_header_t); + oh6_0 = vlib_buffer_get_current (b0); + + if (PREDICT_TRUE (sa0->is_tunnel)) + { + next_hdr_type = IP_PROTOCOL_IPV6; + oh6_0->ip6.ip_version_traffic_class_and_flow_label = + ih6_0->ip6.ip_version_traffic_class_and_flow_label; + } + else + { + next_hdr_type = ih6_0->ip6.protocol; + memmove (oh6_0, ih6_0, sizeof (ip6_header_t)); + } + + oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP; + oh6_0->ip6.hop_limit = 254; + oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi); + oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq); + } + else + { + ip_hdr_size = sizeof (ip4_header_t); + next_hdr_type = IP_PROTOCOL_IP_IN_IP; + oh0 = vlib_buffer_get_current (b0); + + oh0->ip4.ip_version_and_header_length = 0x45; + oh0->ip4.tos = ih0->ip4.tos; + oh0->ip4.fragment_id = 0; + oh0->ip4.flags_and_fragment_offset = 0; + oh0->ip4.ttl = 254; + oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; + oh0->esp.spi = clib_net_to_host_u32 (sa0->spi); + oh0->esp.seq = clib_net_to_host_u32 (sa0->seq); + } + + if (PREDICT_TRUE (sa0->is_tunnel && !sa0->is_tunnel_ip6)) + { + oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32; + oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32; + + /* in tunnel mode send it back to FIB */ + next0 = ESP_ENCRYPT_NEXT_IP4_INPUT; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + } + else if (sa0->is_tunnel && sa0->is_tunnel_ip6) + { + oh6_0->ip6.src_address.as_u64[0] = + sa0->tunnel_src_addr.ip6.as_u64[0]; + oh6_0->ip6.src_address.as_u64[1] = + sa0->tunnel_src_addr.ip6.as_u64[1]; + oh6_0->ip6.dst_address.as_u64[0] = + sa0->tunnel_dst_addr.ip6.as_u64[0]; + oh6_0->ip6.dst_address.as_u64[1] = + sa0->tunnel_dst_addr.ip6.as_u64[1]; + + /* in tunnel mode send it back to FIB */ + next0 = ESP_ENCRYPT_NEXT_IP6_INPUT; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + } + else + { + next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT; + transport_mode = 1; + /*ipv6 already handled */ + if (PREDICT_TRUE (!is_ipv6)) + { + next_hdr_type = ih0->ip4.protocol; + oh0->ip4.src_address.as_u32 = ih0->ip4.src_address.as_u32; + oh0->ip4.dst_address.as_u32 = ih0->ip4.dst_address.as_u32; + } + } + + ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG); + ASSERT (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE); + + int blocks = 1 + (orig_sz + 1) / BLOCK_SIZE; + + /* pad packet in input buffer */ + u8 pad_bytes = BLOCK_SIZE * blocks - 2 - orig_sz; + u8 i; + u8 *padding = vlib_buffer_get_current (b0) + b0->current_length; + + for (i = 0; i < pad_bytes; ++i) + padding[i] = i + 1; + + f0 = vlib_buffer_get_current (b0) + b0->current_length + pad_bytes; + f0->pad_length = pad_bytes; + f0->next_header = next_hdr_type; + b0->current_length += pad_bytes + 2 + + em->esp_integ_algs[sa0->integ_alg].trunc_size; + + vnet_buffer (b0)->sw_if_index[VLIB_RX] = + vnet_buffer (b0)->sw_if_index[VLIB_RX]; + b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; + + struct rte_crypto_sym_op *sym_cop; + sym_cop = (struct rte_crypto_sym_op *) (cop + 1); + + dpdk_cop_priv_t *priv = (dpdk_cop_priv_t *) (sym_cop + 1); + + vnet_buffer (b0)->unused[0] = next0; + priv->iv[0] = sa0->seq; + priv->iv[1] = sa0->seq_hi; + + mb0 = rte_mbuf_from_vlib_buffer (b0); + mb0->data_len = b0->current_length; + mb0->pkt_len = b0->current_length; + mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data; + + rte_crypto_op_attach_sym_session (cop, sess); + + sym_cop->m_src = mb0; + sym_cop->cipher.data.offset = ip_hdr_size + sizeof (esp_header_t); + sym_cop->cipher.data.length = BLOCK_SIZE * blocks + IV_SIZE; + + sym_cop->cipher.iv.data = (u8 *) priv->iv; + sym_cop->cipher.iv.phys_addr = cop->phys_addr + + (uintptr_t) priv->iv - (uintptr_t) cop; + sym_cop->cipher.iv.length = IV_SIZE; + + ASSERT (sa0->integ_alg < IPSEC_INTEG_N_ALG); + ASSERT (sa0->integ_alg != IPSEC_INTEG_ALG_NONE); + + sym_cop->auth.data.offset = ip_hdr_size; + sym_cop->auth.data.length = b0->current_length - ip_hdr_size - + em->esp_integ_algs[sa0->integ_alg].trunc_size; + + sym_cop->auth.digest.data = vlib_buffer_get_current (b0) + + b0->current_length - + em->esp_integ_algs[sa0->integ_alg].trunc_size; + sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset (mb0, + b0->current_length + - + em->esp_integ_algs + [sa0->integ_alg].trunc_size); + sym_cop->auth.digest.length = + em->esp_integ_algs[sa0->integ_alg].trunc_size; + + if (PREDICT_FALSE (sa0->use_esn)) + { + u8 *payload_end = + vlib_buffer_get_current (b0) + b0->current_length; + *((u32 *) payload_end) = sa0->seq_hi; + sym_cop->auth.data.length += sizeof (sa0->seq_hi); + } + + if (PREDICT_FALSE (is_ipv6)) + { + oh6_0->ip6.payload_length = + clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - + sizeof (ip6_header_t)); + } + else + { + oh0->ip4.length = + clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)); + oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4); + } + + if (transport_mode) + vlib_buffer_advance (b0, -sizeof (ethernet_header_t)); + + trace: + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_encrypt_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->spi = sa0->spi; + tr->seq = sa0->seq - 1; + tr->crypto_alg = sa0->crypto_alg; + tr->integ_alg = sa0->integ_alg; + } + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index, + ESP_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + crypto_qp_data_t *qpd; + /* *INDENT-OFF* */ + vec_foreach_index (i, cwm->qp_data) + { + u32 enq; + + qpd = vec_elt_at_index(cwm->qp_data, i); + enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id, + qpd->cops, n_cop_qp[i]); + qpd->inflights += enq; + + if (PREDICT_FALSE(enq < n_cop_qp[i])) + { + crypto_free_cop (qpd, &qpd->cops[enq], n_cop_qp[i] - enq); + vlib_buffer_free (vm, &qpd->bi[enq], n_cop_qp[i] - enq); + + vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index, + ESP_ENCRYPT_ERROR_ENQ_FAIL, + n_cop_qp[i] - enq); + } + } + /* *INDENT-ON* */ + + return from_frame->n_vectors; +} + +VLIB_REGISTER_NODE (dpdk_esp_encrypt_node) = +{ + .function = dpdk_esp_encrypt_node_fn,.name = "dpdk-esp-encrypt",.flags = + VLIB_NODE_FLAG_IS_OUTPUT,.vector_size = sizeof (u32),.format_trace = + format_esp_encrypt_trace,.n_errors = + ARRAY_LEN (esp_encrypt_error_strings),.error_strings = + esp_encrypt_error_strings,.n_next_nodes = 1,.next_nodes = + { + [ESP_ENCRYPT_NEXT_DROP] = "error-drop",} +}; + +VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_node, dpdk_esp_encrypt_node_fn) +/* + * ESP Encrypt Post Node + */ +#define foreach_esp_encrypt_post_error \ + _(PKTS, "ESP post pkts") + typedef enum + { +#define _(sym,str) ESP_ENCRYPT_POST_ERROR_##sym, + foreach_esp_encrypt_post_error +#undef _ + ESP_ENCRYPT_POST_N_ERROR, + } esp_encrypt_post_error_t; + + static char *esp_encrypt_post_error_strings[] = { +#define _(sym,string) string, + foreach_esp_encrypt_post_error +#undef _ + }; + +vlib_node_registration_t dpdk_esp_encrypt_post_node; + +static u8 * +format_esp_encrypt_post_trace (u8 * s, va_list * args) +{ + return s; +} + +static uword +dpdk_esp_encrypt_post_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, *from, *to_next = 0, next_index; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0, next0; + vlib_buffer_t *b0 = 0; + + bi0 = from[0]; + from += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + to_next[0] = bi0; + to_next += 1; + + next0 = vnet_buffer (b0)->unused[0]; + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, bi0, + next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + vlib_node_increment_counter (vm, dpdk_esp_encrypt_post_node.index, + ESP_ENCRYPT_POST_ERROR_PKTS, + from_frame->n_vectors); + + return from_frame->n_vectors; +} + +VLIB_REGISTER_NODE (dpdk_esp_encrypt_post_node) = +{ + .function = dpdk_esp_encrypt_post_node_fn,.name = + "dpdk-esp-encrypt-post",.vector_size = sizeof (u32),.format_trace = + format_esp_encrypt_post_trace,.type = VLIB_NODE_TYPE_INTERNAL,.n_errors = + ARRAY_LEN (esp_encrypt_post_error_strings),.error_strings = + esp_encrypt_post_error_strings,.n_next_nodes = + ESP_ENCRYPT_N_NEXT,.next_nodes = + { +#define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n, + foreach_esp_encrypt_next +#undef _ + } +}; + +VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_post_node, + dpdk_esp_encrypt_post_node_fn) +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/devices/dpdk/ipsec/ipsec.c b/vnet/vnet/devices/dpdk/ipsec/ipsec.c new file mode 100644 index 00000000..de253f02 --- /dev/null +++ b/vnet/vnet/devices/dpdk/ipsec/ipsec.c @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2016 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include + +#define DPDK_CRYPTO_NB_OBJS 2048 +#define DPDK_CRYPTO_CACHE_SIZE 512 +#define DPDK_CRYPTO_PRIV_SIZE 128 +#define DPDK_CRYPTO_N_QUEUE_DESC 512 +#define DPDK_CRYPTO_NB_COPS (1024 * 4) + +/* + * return: + * -1: update failed + * 0: already exist + * 1: mapped + */ +static int +update_qp_data (crypto_worker_main_t * cwm, + u8 cdev_id, u16 qp_id, u8 is_outbound, u16 * idx) +{ + crypto_qp_data_t *qpd; + + /* *INDENT-OFF* */ + vec_foreach_index (*idx, cwm->qp_data) + { + qpd = vec_elt_at_index(cwm->qp_data, *idx); + + if (qpd->dev_id == cdev_id && qpd->qp_id == qp_id && + qpd->is_outbound == is_outbound) + return 0; + } + /* *INDENT-ON* */ + + vec_add2 (cwm->qp_data, qpd, 1); + + qpd->dev_id = cdev_id; + qpd->qp_id = qp_id; + qpd->is_outbound = is_outbound; + + return 1; +} + +/* + * return: + * -1: error + * 0: already exist + * 1: mapped + */ +static int +add_mapping (crypto_worker_main_t * cwm, + u8 cdev_id, u16 qp, u8 is_outbound, + const struct rte_cryptodev_capabilities *cipher_cap, + const struct rte_cryptodev_capabilities *auth_cap) +{ + int mapped; + u16 qp_index; + uword key = 0, data, *ret; + crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key; + + p_key->cipher_algo = (u8) cipher_cap->sym.cipher.algo; + p_key->auth_algo = (u8) auth_cap->sym.auth.algo; + p_key->is_outbound = is_outbound; + + ret = hash_get (cwm->algo_qp_map, key); + if (ret) + return 0; + + mapped = update_qp_data (cwm, cdev_id, qp, is_outbound, &qp_index); + if (mapped < 0) + return -1; + + data = (uword) qp_index; + + ret = hash_set (cwm->algo_qp_map, key, data); + if (!ret) + rte_panic ("Failed to insert hash table\n"); + + return mapped; +} + +/* + * return: + * 0: already exist + * 1: mapped + */ +static int +add_cdev_mapping (crypto_worker_main_t * cwm, + struct rte_cryptodev_info *dev_info, u8 cdev_id, + u16 qp, u8 is_outbound) +{ + const struct rte_cryptodev_capabilities *i, *j; + u32 mapped = 0; + + for (i = dev_info->capabilities; i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) + { + if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) + continue; + + if (check_algo_is_supported (i, NULL) != 0) + continue; + + for (j = dev_info->capabilities; j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; + j++) + { + int status = 0; + + if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) + continue; + + if (check_algo_is_supported (j, NULL) != 0) + continue; + + status = add_mapping (cwm, cdev_id, qp, is_outbound, i, j); + if (status == 1) + mapped += 1; + if (status < 0) + return status; + } + } + + return mapped; +} + +static int +check_cryptodev_queues () +{ + u32 n_qs = 0; + u8 cdev_id; + u32 n_req_qs = 2; + + if (vlib_num_workers () > 0) + n_req_qs = vlib_num_workers () * 2; + + for (cdev_id = 0; cdev_id < rte_cryptodev_count (); cdev_id++) + { + struct rte_cryptodev_info cdev_info; + + rte_cryptodev_info_get (cdev_id, &cdev_info); + + if (! + (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) + continue; + + n_qs += cdev_info.max_nb_queue_pairs; + } + + if (n_qs >= n_req_qs) + return 0; + else + return -1; +} + +static clib_error_t * +dpdk_ipsec_init (vlib_main_t * vm) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); + struct rte_cryptodev_config dev_conf; + struct rte_cryptodev_qp_conf qp_conf; + struct rte_cryptodev_info cdev_info; + struct rte_mempool *rmp; + i32 dev_id, ret; + u32 i, skip_master; + + if (check_cryptodev_queues () < 0) + return clib_error_return (0, "not enough cryptodevs for ipsec"); + + vec_alloc (dcm->workers_main, tm->n_vlib_mains); + _vec_len (dcm->workers_main) = tm->n_vlib_mains; + + fprintf (stdout, "DPDK Cryptodevs info:\n"); + fprintf (stdout, "dev_id\tn_qp\tnb_obj\tcache_size\n"); + /* HW cryptodevs have higher dev_id, use HW first */ + for (dev_id = rte_cryptodev_count () - 1; dev_id >= 0; dev_id--) + { + u16 max_nb_qp, qp = 0; + skip_master = vlib_num_workers () > 0; + + rte_cryptodev_info_get (dev_id, &cdev_info); + + if (! + (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) + continue; + + max_nb_qp = cdev_info.max_nb_queue_pairs; + + for (i = 0; i < tm->n_vlib_mains; i++) + { + u8 is_outbound; + crypto_worker_main_t *cwm; + uword *map; + + if (skip_master) + { + skip_master = 0; + continue; + } + + cwm = vec_elt_at_index (dcm->workers_main, i); + map = cwm->algo_qp_map; + + if (!map) + { + map = hash_create (0, sizeof (crypto_worker_qp_key_t)); + if (!map) + return clib_error_return (0, "unable to create hash table " + "for worker %u", + vlib_mains[i]->cpu_index); + cwm->algo_qp_map = map; + } + + for (is_outbound = 0; is_outbound < 2 && qp < max_nb_qp; + is_outbound++) + { + int mapped = add_cdev_mapping (cwm, &cdev_info, + dev_id, qp, is_outbound); + if (mapped > 0) + qp++; + + if (mapped < 0) + return clib_error_return (0, + "too many queues for one worker"); + } + } + + if (qp == 0) + continue; + + dev_conf.socket_id = rte_cryptodev_socket_id (dev_id); + dev_conf.nb_queue_pairs = cdev_info.max_nb_queue_pairs; + dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_OBJS; + dev_conf.session_mp.cache_size = DPDK_CRYPTO_CACHE_SIZE; + + ret = rte_cryptodev_configure (dev_id, &dev_conf); + if (ret < 0) + return clib_error_return (0, "cryptodev %u config error", dev_id); + + qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC; + for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++) + { + ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf, + dev_conf.socket_id); + if (ret < 0) + return clib_error_return (0, "cryptodev %u qp %u setup error", + dev_id, qp); + } + fprintf (stdout, "%u\t%u\t%u\t%u\n", dev_id, dev_conf.nb_queue_pairs, + DPDK_CRYPTO_NB_OBJS, DPDK_CRYPTO_CACHE_SIZE); + } + + u32 socket_id = rte_socket_id (); + + vec_validate_aligned (dcm->cop_pools, socket_id, CLIB_CACHE_LINE_BYTES); + + /* pool already exists, nothing to do */ + if (dcm->cop_pools[socket_id]) + return 0; + + u8 *pool_name = format (0, "crypto_op_pool_socket%u%c", socket_id, 0); + + rmp = rte_crypto_op_pool_create ((char *) pool_name, + RTE_CRYPTO_OP_TYPE_SYMMETRIC, + DPDK_CRYPTO_NB_COPS * + (1 + vlib_num_workers ()), + DPDK_CRYPTO_CACHE_SIZE, + DPDK_CRYPTO_PRIV_SIZE, socket_id); + vec_free (pool_name); + + if (!rmp) + return clib_error_return (0, "failed to allocate mempool on socket %u", + socket_id); + dcm->cop_pools[socket_id] = rmp; + + dpdk_esp_init (); + + if (vec_len (vlib_mains) == 0) + vlib_node_set_state (&vlib_global_main, dpdk_crypto_input_node.index, + VLIB_NODE_STATE_POLLING); + else + for (i = 1; i < tm->n_vlib_mains; i++) + vlib_node_set_state (vlib_mains[i], dpdk_crypto_input_node.index, + VLIB_NODE_STATE_POLLING); + + return 0; +} + +VLIB_MAIN_LOOP_ENTER_FUNCTION (dpdk_ipsec_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/devices/dpdk/ipsec/ipsec.h b/vnet/vnet/devices/dpdk/ipsec/ipsec.h new file mode 100644 index 00000000..e103655c --- /dev/null +++ b/vnet/vnet/devices/dpdk/ipsec/ipsec.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2016 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __DPDK_IPSEC_H__ +#define __DPDK_IPSEC_H__ + +#include + +#undef always_inline +#include +#include + +#if CLIB_DEBUG > 0 +#define always_inline static inline +#else +#define always_inline static inline __attribute__ ((__always_inline__)) +#endif + + +#define MAX_QP_PER_LCORE 16 + +typedef struct +{ + u32 iv[4]; + u8 icv[64]; +} dpdk_cop_priv_t; + +typedef struct +{ + u8 cipher_algo; + u8 auth_algo; + u8 is_outbound; +} crypto_worker_qp_key_t; + +typedef struct +{ + u16 dev_id; + u16 qp_id; + u16 is_outbound; + i16 inflights; + u32 bi[VLIB_FRAME_SIZE]; + struct rte_crypto_op *cops[VLIB_FRAME_SIZE]; + struct rte_crypto_op **free_cops; +} crypto_qp_data_t; + +typedef struct +{ + u8 qp_index; + void *sess; +} crypto_sa_session_t; + +typedef struct +{ + crypto_sa_session_t *sa_sess_d[2]; + crypto_qp_data_t *qp_data; + uword *algo_qp_map; +} crypto_worker_main_t; + +typedef struct +{ + struct rte_mempool **cop_pools; + crypto_worker_main_t *workers_main; +} dpdk_crypto_main_t; + +dpdk_crypto_main_t dpdk_crypto_main; + +extern vlib_node_registration_t dpdk_crypto_input_node; + +#define CRYPTO_N_FREE_COPS (VLIB_FRAME_SIZE * 3) + +static_always_inline void +crypto_alloc_cops () +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + u32 cpu_index = os_get_cpu_number (); + crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + unsigned socket_id = rte_socket_id (); + crypto_qp_data_t *qpd; + + /* *INDENT-OFF* */ + vec_foreach (qpd, cwm->qp_data) + { + u32 l = vec_len (qpd->free_cops); + + if (PREDICT_FALSE (l < VLIB_FRAME_SIZE)) + { + u32 n_alloc; + + if (PREDICT_FALSE (!qpd->free_cops)) + vec_alloc (qpd->free_cops, CRYPTO_N_FREE_COPS); + + n_alloc = rte_crypto_op_bulk_alloc (dcm->cop_pools[socket_id], + RTE_CRYPTO_OP_TYPE_SYMMETRIC, + &qpd->free_cops[l], + CRYPTO_N_FREE_COPS - l - 1); + + _vec_len (qpd->free_cops) = l + n_alloc; + } + } + /* *INDENT-ON* */ +} + +static_always_inline void +crypto_free_cop (crypto_qp_data_t * qpd, struct rte_crypto_op **cops, u32 n) +{ + u32 l = vec_len (qpd->free_cops); + + if (l + n >= CRYPTO_N_FREE_COPS) + { + l -= VLIB_FRAME_SIZE; + rte_mempool_put_bulk (cops[0]->mempool, + (void **) &qpd->free_cops[l], VLIB_FRAME_SIZE); + } + clib_memcpy (&qpd->free_cops[l], cops, sizeof (*cops) * n); + + _vec_len (qpd->free_cops) = l + n; +} + +static_always_inline int +check_algo_is_supported (const struct rte_cryptodev_capabilities *cap, + char *name) +{ + struct + { + uint8_t cipher_algo; + enum rte_crypto_sym_xform_type type; + union + { + enum rte_crypto_auth_algorithm auth; + enum rte_crypto_cipher_algorithm cipher; + }; + char *name; + } supported_algo[] = + { + { + .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher = + RTE_CRYPTO_CIPHER_NULL,.name = "NULL"}, + { + .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher = + RTE_CRYPTO_CIPHER_AES_CBC,.name = "AES_CBC"}, + { + .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher = + RTE_CRYPTO_CIPHER_AES_CTR,.name = "AES_CTR"}, + { + .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher = + RTE_CRYPTO_CIPHER_3DES_CBC,.name = "3DES-CBC"}, + { + .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.auth = + RTE_CRYPTO_CIPHER_AES_GCM,.name = "AES-GCM"}, + { + .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = + RTE_CRYPTO_AUTH_SHA1_HMAC,.name = "HMAC-SHA1"}, + { + .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = + RTE_CRYPTO_AUTH_SHA256_HMAC,.name = "HMAC-SHA256"}, + { + .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = + RTE_CRYPTO_AUTH_SHA384_HMAC,.name = "HMAC-SHA384"}, + { + .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = + RTE_CRYPTO_AUTH_SHA512_HMAC,.name = "HMAC-SHA512"}, + { + .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = + RTE_CRYPTO_AUTH_AES_XCBC_MAC,.name = "AES-XCBC-MAC"}, + { + .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = + RTE_CRYPTO_AUTH_AES_GCM,.name = "AES-GCM"}, + { + /* tail */ + .type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED},}; + uint32_t i = 0; + + if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + return -1; + + while (supported_algo[i].type != RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED) + { + if (cap->sym.xform_type == supported_algo[i].type) + { + if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER && + cap->sym.cipher.algo == supported_algo[i].cipher) || + (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH && + cap->sym.auth.algo == supported_algo[i].auth)) + { + if (name) + strcpy (name, supported_algo[i].name); + return 0; + } + } + + i++; + } + + return -1; +} + +#endif /* __DPDK_IPSEC_H__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/ipsec-gre/interface.c b/vnet/vnet/ipsec-gre/interface.c index fae79b97..56832ee1 100644 --- a/vnet/vnet/ipsec-gre/interface.c +++ b/vnet/vnet/ipsec-gre/interface.c @@ -27,7 +27,14 @@ #include #include #include + +#if DPDK_CRYPTO==1 +#include +#define ESP_NODE "dpdk-esp-encrypt" +#else #include +#define ESP_NODE "esp-encrypt" +#endif u8 * format_ipsec_gre_tunnel (u8 * s, va_list * args) @@ -186,7 +193,7 @@ vnet_ipsec_gre_add_del_tunnel (vnet_ipsec_gre_add_del_tunnel_args_t * a, hash_set (igm->tunnel_by_key, key, t - igm->tunnels); slot = vlib_node_add_named_next_with_slot - (vnm->vlib_main, hi->tx_node_index, "esp-encrypt", + (vnm->vlib_main, hi->tx_node_index, ESP_NODE, IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT); ASSERT (slot == IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT); diff --git a/vnet/vnet/ipsec/esp.h b/vnet/vnet/ipsec/esp.h index b9feacbb..50cac806 100644 --- a/vnet/vnet/ipsec/esp.h +++ b/vnet/vnet/ipsec/esp.h @@ -12,6 +12,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#ifndef __ESP_H__ +#define __ESP_H__ #include #include @@ -77,6 +79,154 @@ typedef struct esp_main_t esp_main; +#define ESP_WINDOW_SIZE (64) +#define ESP_SEQ_MAX (4294967295UL) + + +always_inline int +esp_replay_check (ipsec_sa_t * sa, u32 seq) +{ + u32 diff; + + if (PREDICT_TRUE (seq > sa->last_seq)) + return 0; + + diff = sa->last_seq - seq; + + if (ESP_WINDOW_SIZE > diff) + return (sa->replay_window & (1ULL << diff)) ? 1 : 0; + else + return 1; + + return 0; +} + +always_inline int +esp_replay_check_esn (ipsec_sa_t * sa, u32 seq) +{ + u32 tl = sa->last_seq; + u32 th = sa->last_seq_hi; + u32 diff = tl - seq; + + if (PREDICT_TRUE (tl >= (ESP_WINDOW_SIZE - 1))) + { + if (seq >= (tl - ESP_WINDOW_SIZE + 1)) + { + sa->seq_hi = th; + if (seq <= tl) + return (sa->replay_window & (1ULL << diff)) ? 1 : 0; + else + return 0; + } + else + { + sa->seq_hi = th + 1; + return 0; + } + } + else + { + if (seq >= (tl - ESP_WINDOW_SIZE + 1)) + { + sa->seq_hi = th - 1; + return (sa->replay_window & (1ULL << diff)) ? 1 : 0; + } + else + { + sa->seq_hi = th; + if (seq <= tl) + return (sa->replay_window & (1ULL << diff)) ? 1 : 0; + else + return 0; + } + } + + return 0; +} + +/* TODO seq increment should be atomic to be accessed by multiple workers */ +always_inline void +esp_replay_advance (ipsec_sa_t * sa, u32 seq) +{ + u32 pos; + + if (seq > sa->last_seq) + { + pos = seq - sa->last_seq; + if (pos < ESP_WINDOW_SIZE) + sa->replay_window = ((sa->replay_window) << pos) | 1; + else + sa->replay_window = 1; + sa->last_seq = seq; + } + else + { + pos = sa->last_seq - seq; + sa->replay_window |= (1ULL << pos); + } +} + +always_inline void +esp_replay_advance_esn (ipsec_sa_t * sa, u32 seq) +{ + int wrap = sa->seq_hi - sa->last_seq_hi; + u32 pos; + + if (wrap == 0 && seq > sa->last_seq) + { + pos = seq - sa->last_seq; + if (pos < ESP_WINDOW_SIZE) + sa->replay_window = ((sa->replay_window) << pos) | 1; + else + sa->replay_window = 1; + sa->last_seq = seq; + } + else if (wrap > 0) + { + pos = ~seq + sa->last_seq + 1; + if (pos < ESP_WINDOW_SIZE) + sa->replay_window = ((sa->replay_window) << pos) | 1; + else + sa->replay_window = 1; + sa->last_seq = seq; + sa->last_seq_hi = sa->seq_hi; + } + else if (wrap < 0) + { + pos = ~seq + sa->last_seq + 1; + sa->replay_window |= (1ULL << pos); + } + else + { + pos = sa->last_seq - seq; + sa->replay_window |= (1ULL << pos); + } +} + +always_inline int +esp_seq_advance (ipsec_sa_t * sa) +{ + if (PREDICT_TRUE (sa->use_esn)) + { + if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX)) + { + if (PREDICT_FALSE + (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX)) + return 1; + sa->seq_hi++; + } + sa->seq++; + } + else + { + if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX)) + return 1; + sa->seq++; + } + + return 0; +} + always_inline void esp_init () { @@ -159,6 +309,7 @@ hmac_calc (ipsec_integ_alg_t alg, return em->esp_integ_algs[alg].trunc_size; } +#endif /* __ESP_H__ */ /* * fd.io coding-style-patch-verification: ON diff --git a/vnet/vnet/ipsec/esp_decrypt.c b/vnet/vnet/ipsec/esp_decrypt.c index 07a5edac..e69cd851 100644 --- a/vnet/vnet/ipsec/esp_decrypt.c +++ b/vnet/vnet/ipsec/esp_decrypt.c @@ -22,8 +22,6 @@ #include #include -#define ESP_WINDOW_SIZE 64 - #define foreach_esp_decrypt_next \ _(DROP, "error-drop") \ _(IP4_INPUT, "ip4-input") \ @@ -109,125 +107,6 @@ esp_decrypt_aes_cbc (ipsec_crypto_alg_t alg, EVP_DecryptFinal_ex (ctx, out + out_len, &out_len); } -always_inline int -esp_replay_check (ipsec_sa_t * sa, u32 seq) -{ - u32 diff; - - if (PREDICT_TRUE (seq > sa->last_seq)) - return 0; - - diff = sa->last_seq - seq; - - if (ESP_WINDOW_SIZE > diff) - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - else - return 1; - - return 0; -} - -always_inline int -esp_replay_check_esn (ipsec_sa_t * sa, u32 seq) -{ - u32 tl = sa->last_seq; - u32 th = sa->last_seq_hi; - u32 diff = tl - seq; - - if (PREDICT_TRUE (tl >= (ESP_WINDOW_SIZE - 1))) - { - if (seq >= (tl - ESP_WINDOW_SIZE + 1)) - { - sa->seq_hi = th; - if (seq <= tl) - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - else - return 0; - } - else - { - sa->seq_hi = th + 1; - return 0; - } - } - else - { - if (seq >= (tl - ESP_WINDOW_SIZE + 1)) - { - sa->seq_hi = th - 1; - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - } - else - { - sa->seq_hi = th; - if (seq <= tl) - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - else - return 0; - } - } - - return 0; -} - -always_inline void -esp_replay_advance (ipsec_sa_t * sa, u32 seq) -{ - u32 pos; - - if (seq > sa->last_seq) - { - pos = seq - sa->last_seq; - if (pos < ESP_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; - sa->last_seq = seq; - } - else - { - pos = sa->last_seq - seq; - sa->replay_window |= (1ULL << pos); - } -} - -always_inline void -esp_replay_advance_esn (ipsec_sa_t * sa, u32 seq) -{ - int wrap = sa->seq_hi - sa->last_seq_hi; - u32 pos; - - if (wrap == 0 && seq > sa->last_seq) - { - pos = seq - sa->last_seq; - if (pos < ESP_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; - sa->last_seq = seq; - } - else if (wrap > 0) - { - pos = ~seq + sa->last_seq + 1; - if (pos < ESP_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; - sa->last_seq = seq; - sa->last_seq_hi = sa->seq_hi; - } - else if (wrap < 0) - { - pos = ~seq + sa->last_seq + 1; - sa->replay_window |= (1ULL << pos); - } - else - { - pos = sa->last_seq - seq; - sa->replay_window |= (1ULL << pos); - } -} - static uword esp_decrypt_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) diff --git a/vnet/vnet/ipsec/esp_encrypt.c b/vnet/vnet/ipsec/esp_encrypt.c index b7186e3d..b947611e 100644 --- a/vnet/vnet/ipsec/esp_encrypt.c +++ b/vnet/vnet/ipsec/esp_encrypt.c @@ -22,7 +22,6 @@ #include #include -#define ESP_SEQ_MAX (4294967295UL) #define foreach_esp_encrypt_next \ _(DROP, "error-drop") \ @@ -111,30 +110,6 @@ esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg, EVP_EncryptFinal_ex (ctx, out + out_len, &out_len); } -always_inline int -esp_seq_advance (ipsec_sa_t * sa) -{ - if (PREDICT_TRUE (sa->use_esn)) - { - if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX)) - { - if (PREDICT_FALSE - (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX)) - return 1; - sa->seq_hi++; - } - sa->seq++; - } - else - { - if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX)) - return 1; - sa->seq++; - } - - return 0; -} - static uword esp_encrypt_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) diff --git a/vnet/vnet/ipsec/ipsec.c b/vnet/vnet/ipsec/ipsec.c index 223440ec..ee85c402 100644 --- a/vnet/vnet/ipsec/ipsec.c +++ b/vnet/vnet/ipsec/ipsec.c @@ -21,9 +21,25 @@ #include #include -#include #include +#if DPDK_CRYPTO==1 +#include +#define ESP_NODE "dpdk-esp-encrypt" +#else +#include +#define ESP_NODE "esp-encrypt" +#endif + +#if DPDK_CRYPTO==0 +/* dummy function */ +static int +add_del_sa_sess (u32 sa_index, u8 is_add) +{ + return 0; +} +#endif + u32 ipsec_get_sa_index_by_sa_id (u32 sa_id) { @@ -433,6 +449,7 @@ ipsec_add_del_sa (vlib_main_t * vm, ipsec_sa_t * new_sa, int is_add) return VNET_API_ERROR_SYSCALL_ERROR_1; /* sa used in policy */ } hash_unset (im->sa_index_by_sa_id, sa->id); + add_del_sa_sess (sa_index, is_add); pool_put (im->sad, sa); } else /* create new SA */ @@ -441,6 +458,8 @@ ipsec_add_del_sa (vlib_main_t * vm, ipsec_sa_t * new_sa, int is_add) clib_memcpy (sa, new_sa, sizeof (*sa)); sa_index = sa - im->sad; hash_set (im->sa_index_by_sa_id, sa->id, sa_index); + if (add_del_sa_sess (sa_index, is_add) < 0) + return VNET_API_ERROR_SYSCALL_ERROR_1; } return 0; } @@ -476,6 +495,12 @@ ipsec_set_sa_key (vlib_main_t * vm, ipsec_sa_t * sa_update) sa->integ_key_len = sa_update->integ_key_len; } + if (sa->crypto_key_len + sa->integ_key_len > 0) + { + if (add_del_sa_sess (sa_index, 0) < 0) + return VNET_API_ERROR_SYSCALL_ERROR_1; + } + return 0; } @@ -522,7 +547,8 @@ ipsec_init (vlib_main_t * vm) ASSERT (node); im->error_drop_node_index = node->index; - node = vlib_get_node_by_name (vm, (u8 *) "esp-encrypt"); + node = vlib_get_node_by_name (vm, (u8 *) ESP_NODE); + ASSERT (node); im->esp_encrypt_node_index = node->index; @@ -530,7 +556,6 @@ ipsec_init (vlib_main_t * vm) ASSERT (node); im->ip4_lookup_node_index = node->index; - if ((error = vlib_call_init_function (vm, ipsec_cli_init))) return error; diff --git a/vnet/vnet/ipsec/ipsec.h b/vnet/vnet/ipsec/ipsec.h index d33df383..65d7bad3 100644 --- a/vnet/vnet/ipsec/ipsec.h +++ b/vnet/vnet/ipsec/ipsec.h @@ -12,6 +12,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#ifndef __IPSEC_H__ +#define __IPSEC_H__ + #define IPSEC_FLAG_IPSEC_GRE_TUNNEL (1 << 0) #define foreach_ipsec_policy_action \ @@ -307,6 +310,8 @@ get_next_output_feature_node_index (vlib_buffer_t * b, return node->next_nodes[next]; } +#endif /* __IPSEC_H__ */ + /* * fd.io coding-style-patch-verification: ON * diff --git a/vnet/vnet/ipsec/ipsec_cli.c b/vnet/vnet/ipsec/ipsec_cli.c index 785e040b..8920924d 100644 --- a/vnet/vnet/ipsec/ipsec_cli.c +++ b/vnet/vnet/ipsec/ipsec_cli.c @@ -768,7 +768,6 @@ VLIB_CLI_COMMAND (set_interface_key_command, static) = { }; /* *INDENT-ON* */ - clib_error_t * ipsec_cli_init (vlib_main_t * vm) { diff --git a/vnet/vnet/ipsec/ipsec_if.c b/vnet/vnet/ipsec/ipsec_if.c index 77d5d19a..a8da046f 100644 --- a/vnet/vnet/ipsec/ipsec_if.c +++ b/vnet/vnet/ipsec/ipsec_if.c @@ -20,6 +20,20 @@ #include #include +#if DPDK_CRYPTO==1 +#include +#else +#include +#endif + +#if DPDK_CRYPTO==0 +/* dummy function */ +static int +add_del_sa_sess (u32 sa_index, u8 is_add) +{ + return 0; +} +#endif void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length); @@ -124,6 +138,8 @@ ipsec_add_del_tunnel_if_internal (vnet_main_t * vnm, args->remote_crypto_key_len); } + add_del_sa_sess (t->input_sa_index, args->is_add); + pool_get (im->sad, sa); memset (sa, 0, sizeof (*sa)); t->output_sa_index = sa - im->sad; @@ -149,6 +165,8 @@ ipsec_add_del_tunnel_if_internal (vnet_main_t * vnm, args->local_crypto_key_len); } + add_del_sa_sess (t->output_sa_index, args->is_add); + hash_set (im->ipsec_if_pool_index_by_key, key, t - im->tunnel_interfaces); @@ -192,8 +210,17 @@ ipsec_add_del_tunnel_if_internal (vnet_main_t * vnm, /* delete input and output SA */ sa = pool_elt_at_index (im->sad, t->input_sa_index); + + if (add_del_sa_sess (t->input_sa_index, args->is_add) < 0) + return VNET_API_ERROR_SYSCALL_ERROR_1; + pool_put (im->sad, sa); + sa = pool_elt_at_index (im->sad, t->output_sa_index); + + if (add_del_sa_sess (t->output_sa_index, args->is_add) < 0) + return VNET_API_ERROR_SYSCALL_ERROR_1; + pool_put (im->sad, sa); hash_unset (im->ipsec_if_pool_index_by_key, key); @@ -282,6 +309,9 @@ ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index, sa->crypto_alg = alg; sa->crypto_key_len = vec_len (key); clib_memcpy (sa->crypto_key, key, vec_len (key)); + + if (add_del_sa_sess (t->input_sa_index, 0) < 0) + return VNET_API_ERROR_SYSCALL_ERROR_1; } else if (type == IPSEC_IF_SET_KEY_TYPE_LOCAL_INTEG) { @@ -289,6 +319,9 @@ ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index, sa->integ_alg = alg; sa->integ_key_len = vec_len (key); clib_memcpy (sa->integ_key, key, vec_len (key)); + + if (add_del_sa_sess (t->output_sa_index, 0) < 0) + return VNET_API_ERROR_SYSCALL_ERROR_1; } else if (type == IPSEC_IF_SET_KEY_TYPE_REMOTE_CRYPTO) { @@ -296,6 +329,9 @@ ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index, sa->crypto_alg = alg; sa->crypto_key_len = vec_len (key); clib_memcpy (sa->crypto_key, key, vec_len (key)); + + if (add_del_sa_sess (t->input_sa_index, 0) < 0) + return VNET_API_ERROR_SYSCALL_ERROR_1; } else if (type == IPSEC_IF_SET_KEY_TYPE_REMOTE_INTEG) { @@ -303,6 +339,9 @@ ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index, sa->integ_alg = alg; sa->integ_key_len = vec_len (key); clib_memcpy (sa->integ_key, key, vec_len (key)); + + if (add_del_sa_sess (t->output_sa_index, 0) < 0) + return VNET_API_ERROR_SYSCALL_ERROR_1; } else return VNET_API_ERROR_INVALID_VALUE; diff --git a/vnet/vnet/ipsec/ipsec_if_in.c b/vnet/vnet/ipsec/ipsec_if_in.c index 93cedce2..db75ab92 100644 --- a/vnet/vnet/ipsec/ipsec_if_in.c +++ b/vnet/vnet/ipsec/ipsec_if_in.c @@ -22,6 +22,12 @@ #include #include +#if DPDK_CRYPTO==1 +#define ESP_NODE "dpdk-esp-decrypt" +#else +#define ESP_NODE "esp-decrypt" +#endif + /* Statistics (not really errors) */ #define foreach_ipsec_if_input_error \ _(RX, "good packets received") @@ -153,8 +159,8 @@ VLIB_REGISTER_NODE (ipsec_if_input_node) = { .n_next_nodes = IPSEC_IF_INPUT_N_NEXT, .next_nodes = { - [IPSEC_IF_INPUT_NEXT_ESP_DECRYPT] = "esp-decrypt", - [IPSEC_IF_INPUT_NEXT_DROP] = "error-drop", + [IPSEC_IF_INPUT_NEXT_ESP_DECRYPT] = ESP_NODE, + [IPSEC_IF_INPUT_NEXT_DROP] = "error-drop", }, }; /* *INDENT-ON* */ diff --git a/vnet/vnet/ipsec/ipsec_if_out.c b/vnet/vnet/ipsec/ipsec_if_out.c index a605874e..8f062828 100644 --- a/vnet/vnet/ipsec/ipsec_if_out.c +++ b/vnet/vnet/ipsec/ipsec_if_out.c @@ -21,6 +21,11 @@ #include +#if DPDK_CRYPTO==1 +#define ESP_NODE "dpdk-esp-encrypt" +#else +#define ESP_NODE "esp-encrypt" +#endif /* Statistics (not really errors) */ #define foreach_ipsec_if_output_error \ @@ -140,7 +145,7 @@ VLIB_REGISTER_NODE (ipsec_if_output_node) = { .n_next_nodes = IPSEC_IF_OUTPUT_N_NEXT, .next_nodes = { - [IPSEC_IF_OUTPUT_NEXT_ESP_ENCRYPT] = "esp-encrypt", + [IPSEC_IF_OUTPUT_NEXT_ESP_ENCRYPT] = ESP_NODE, [IPSEC_IF_OUTPUT_NEXT_DROP] = "error-drop", }, }; diff --git a/vnet/vnet/ipsec/ipsec_input.c b/vnet/vnet/ipsec/ipsec_input.c index 8360a1d5..4662c1a1 100644 --- a/vnet/vnet/ipsec/ipsec_input.c +++ b/vnet/vnet/ipsec/ipsec_input.c @@ -23,9 +23,15 @@ #include #include +#if DPDK_CRYPTO==1 +#define ESP_NODE "dpdk-esp-decrypt" +#else +#define ESP_NODE "esp-decrypt" +#endif + #define foreach_ipsec_input_next \ _(DROP, "error-drop") \ -_(ESP_DECRYPT, "esp-decrypt") +_(ESP_DECRYPT, ESP_NODE) #define _(v, s) IPSEC_INPUT_NEXT_##v, typedef enum diff --git a/vnet/vnet/ipsec/ipsec_output.c b/vnet/vnet/ipsec/ipsec_output.c index 3810520d..97977899 100644 --- a/vnet/vnet/ipsec/ipsec_output.c +++ b/vnet/vnet/ipsec/ipsec_output.c @@ -21,11 +21,17 @@ #include +#if DPDK_CRYPTO==1 +#define ESP_NODE "dpdk-esp-encrypt" +#else +#define ESP_NODE "esp-encrypt" +#endif + #if IPSEC > 0 #define foreach_ipsec_output_next \ _(DROP, "error-drop") \ -_(ESP_ENCRYPT, "esp-encrypt") +_(ESP_ENCRYPT, ESP_NODE) #define _(v, s) IPSEC_OUTPUT_NEXT_##v, typedef enum diff --git a/vpp/Makefile.am b/vpp/Makefile.am index 52ce0655..214cc743 100644 --- a/vpp/Makefile.am +++ b/vpp/Makefile.am @@ -13,7 +13,7 @@ AUTOMAKE_OPTIONS = foreign subdir-objects -AM_CFLAGS = -Wall @DPDK@ @IPSEC@ @VCGN@ @IPV6SR@ +AM_CFLAGS = -Wall @DPDK@ @DPDK_CRYPTO@ @IPSEC@ @VCGN@ @IPV6SR@ noinst_PROGRAMS = BUILT_SOURCES = @@ -110,6 +110,9 @@ else vpp_LDFLAGS += -l:libdpdk.a endif vpp_LDFLAGS += -Wl,--no-whole-archive +if WITH_DPDK_CRYPTO +vpp_LDADD += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB +endif endif vpp_LDADD += -lvppinfra -lm -lpthread -ldl diff --git a/vpp/configure.ac b/vpp/configure.ac index 0bc13e63..d36695cc 100644 --- a/vpp/configure.ac +++ b/vpp/configure.ac @@ -9,6 +9,11 @@ AC_ARG_WITH(dpdk, [with_dpdk=1], [with_dpdk=0]) +AC_ARG_WITH(dpdk_crypto, + AC_HELP_STRING([--with-dpdk-crypto],[Use DPDK cryptodev]), + [with_dpdk_crypto=1], + [with_dpdk_crypto=0]) + AC_ARG_ENABLE(dpdk-shared, AC_HELP_STRING([--enable-dpdk-shared],[Link with DPDK shared lib]), [enable_dpdk_shared=1], @@ -32,6 +37,9 @@ AC_ARG_WITH(ipv6sr, AM_CONDITIONAL(WITH_DPDK, test "$with_dpdk" = "1") AM_CONDITIONAL(ENABLE_DPDK_SHARED, test "$enable_dpdk_shared" = "1") AC_SUBST(DPDK,["-DDPDK=${with_dpdk} -DDPDK_SHARED_LIB=${enable_dpdk_shared}"]) +AM_CONDITIONAL(WITH_DPDK_CRYPTO, test "$with_dpdk_crypto" = "1") +AC_SUBST(DPDK_CRYPTO,[-DDPDK_CRYPTO=${with_dpdk_crypto}]) + AM_COND_IF( [ENABLE_DPDK_SHARED], -- cgit 1.2.3-korg From 696f1adec0df3b8f161862566dd9c86174302658 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Fri, 23 Dec 2016 22:42:41 +0100 Subject: dpdk: Add support for Mellanox ConnectX-4 devices Due to external library dependency support for Mellanox devices is disabled. To enable it uncoment following line: vpp_uses_dpdk_mlx5_pmd = yes in build-data/platforms/vpp.mk and install OFED libraries. Change-Id: I131d52b5d449a958349f31f9cc04311948f78b71 Signed-off-by: Damjan Marion --- build-data/packages/dpdk.mk | 5 +++ build-data/packages/vnet.mk | 3 ++ build-data/packages/vpp.mk | 3 ++ build-data/platforms/vpp.mk | 1 + dpdk/Makefile | 4 +- vnet/vnet/devices/dpdk/dpdk.h | 2 + vnet/vnet/devices/dpdk/dpdk_priv.h | 9 +--- vnet/vnet/devices/dpdk/format.c | 8 ++++ vnet/vnet/devices/dpdk/init.c | 86 ++++++++++++++++++++++++++------------ vpp/Makefile.am | 3 ++ vpp/configure.ac | 7 +++- 11 files changed, 96 insertions(+), 35 deletions(-) (limited to 'dpdk/Makefile') diff --git a/build-data/packages/dpdk.mk b/build-data/packages/dpdk.mk index a529e365..2dcf4093 100644 --- a/build-data/packages/dpdk.mk +++ b/build-data/packages/dpdk.mk @@ -27,6 +27,11 @@ ifneq ($(DPDK_CRYPTO_PMD),) DPDK_MAKE_ARGS += DPDK_CRYPTO_PMD=y endif +DPDK_MLX5_PMD=$(strip $($(PLATFORM)_uses_dpdk_mlx5_pmd)) +ifneq ($(DPDK_MLX5_PMD),) +DPDK_MAKE_ARGS += DPDK_MLX5_PMD=y +endif + DPDK_PLATFORM_TARGET=$(strip $($(PLATFORM)_dpdk_target)) ifneq ($(DPDK_PLATFORM_TARGET),) DPDK_MAKE_ARGS += DPDK_TARGET=$(DPDK_PLATFORM_TARGET) diff --git a/build-data/packages/vnet.mk b/build-data/packages/vnet.mk index 399ca1b4..57c444e8 100644 --- a/build-data/packages/vnet.mk +++ b/build-data/packages/vnet.mk @@ -41,4 +41,7 @@ endif ifeq ($($(PLATFORM)_uses_dpdk_cryptodev),yes) vnet_configure_args += --with-dpdk-crypto endif +ifeq ($($(PLATFORM)_uses_dpdk_mlx5_pmd),yes) +vnet_configure_args += --with-dpdk-mlx5-pmd +endif endif diff --git a/build-data/packages/vpp.mk b/build-data/packages/vpp.mk index 6831c6b8..a3d60528 100644 --- a/build-data/packages/vpp.mk +++ b/build-data/packages/vpp.mk @@ -48,4 +48,7 @@ endif ifeq ($($(PLATFORM)_uses_dpdk_cryptodev),yes) vpp_configure_args += --with-dpdk-crypto endif +ifeq ($($(PLATFORM)_uses_dpdk_mlx5_pmd),yes) +vpp_configure_args += --with-dpdk-mlx5-pmd +endif endif diff --git a/build-data/platforms/vpp.mk b/build-data/platforms/vpp.mk index 15d4dc39..97ddc57d 100644 --- a/build-data/platforms/vpp.mk +++ b/build-data/platforms/vpp.mk @@ -42,6 +42,7 @@ plugins_configure_args_vpp = --with-dpdk # DPDK configuration parameters # vpp_uses_dpdk_cryptodev = yes +# vpp_uses_dpdk_mlx5_pmd = yes # vpp_uses_external_dpdk = yes # vpp_dpdk_inc_dir = /usr/include/dpdk # vpp_dpdk_lib_dir = /usr/lib diff --git a/dpdk/Makefile b/dpdk/Makefile index 46cc3db1..178c30c6 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -21,7 +21,8 @@ DPDK_DOWNLOAD_DIR ?= $(HOME)/Downloads DPDK_MARCH ?= native DPDK_TUNE ?= generic DPDK_DEBUG ?= n -DPDK_CRYPTO_PMD ?= n +DPDK_CRYPTO_PMD ?= n +DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) @@ -122,6 +123,7 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_LIBRTE_PMD_AESNI_MB,$(DPDK_CRYPTO_PMD)) $(call set,RTE_LIBRTE_PMD_AESNI_GCM,$(DPDK_CRYPTO_PMD)) $(call set,RTE_LIBRTE_PMD_QAT,$(DPDK_CRYPTO_PMD)) + $(call set,RTE_LIBRTE_MLX5_PMD,$(DPDK_MLX5_PMD)) @# not needed $(call set,RTE_LIBRTE_TIMER,n) $(call set,RTE_LIBRTE_CFGFILE,n) diff --git a/vnet/vnet/devices/dpdk/dpdk.h b/vnet/vnet/devices/dpdk/dpdk.h index 3669bc5f..d8f378d2 100644 --- a/vnet/vnet/devices/dpdk/dpdk.h +++ b/vnet/vnet/devices/dpdk/dpdk.h @@ -83,6 +83,7 @@ extern vlib_node_registration_t handoff_dispatch_node; _ ("rte_bond_pmd", BOND) \ _ ("net_fm10k", FM10K) \ _ ("net_cxgbe", CXGBE) \ + _ ("net_mlx5", MLX5) \ _ ("net_dpaa2", DPAA2) #else #define foreach_dpdk_pmd \ @@ -118,6 +119,7 @@ typedef enum VNET_DPDK_PORT_TYPE_ETH_1G, VNET_DPDK_PORT_TYPE_ETH_10G, VNET_DPDK_PORT_TYPE_ETH_40G, + VNET_DPDK_PORT_TYPE_ETH_100G, VNET_DPDK_PORT_TYPE_ETH_BOND, VNET_DPDK_PORT_TYPE_ETH_SWITCH, VNET_DPDK_PORT_TYPE_AF_PACKET, diff --git a/vnet/vnet/devices/dpdk/dpdk_priv.h b/vnet/vnet/devices/dpdk/dpdk_priv.h index eb13df2c..0c81dbc3 100644 --- a/vnet/vnet/devices/dpdk/dpdk_priv.h +++ b/vnet/vnet/devices/dpdk/dpdk_priv.h @@ -13,15 +13,10 @@ * limitations under the License. */ -#define DPDK_NB_RX_DESC_DEFAULT 512 -#define DPDK_NB_TX_DESC_DEFAULT 512 +#define DPDK_NB_RX_DESC_DEFAULT 1024 +#define DPDK_NB_TX_DESC_DEFAULT 1024 #define DPDK_NB_RX_DESC_VIRTIO 256 #define DPDK_NB_TX_DESC_VIRTIO 256 -#define DPDK_NB_RX_DESC_10GE 1024 -#define DPDK_NB_TX_DESC_10GE 1024 -#define DPDK_NB_RX_DESC_40GE 1024 -#define DPDK_NB_TX_DESC_40GE 1024 -#define DPDK_NB_RX_DESC_ENIC 1024 #define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_QSFP_A 0x1583 diff --git a/vnet/vnet/devices/dpdk/format.c b/vnet/vnet/devices/dpdk/format.c index 0b8a6939..ff7c7a5a 100644 --- a/vnet/vnet/devices/dpdk/format.c +++ b/vnet/vnet/devices/dpdk/format.c @@ -184,6 +184,10 @@ format_dpdk_device_name (u8 * s, va_list * args) device_name = "FortyGigabitEthernet"; break; + case VNET_DPDK_PORT_TYPE_ETH_100G: + device_name = "HundredGigabitEthernet"; + break; + case VNET_DPDK_PORT_TYPE_ETH_BOND: return format (s, "BondEthernet%d", dm->devices[i].device_index); @@ -268,6 +272,10 @@ format_dpdk_device_type (u8 * s, va_list * args) dev_type = "Chelsio T4/T5"; break; + case VNET_DPDK_PMD_MLX5: + dev_type = "Mellanox ConnectX-4 Family"; + break; + case VNET_DPDK_PMD_VMXNET3: dev_type = "VMware VMXNET3"; break; diff --git a/vnet/vnet/devices/dpdk/init.c b/vnet/vnet/devices/dpdk/init.c index 0448c15f..693ca985 100755 --- a/vnet/vnet/devices/dpdk/init.c +++ b/vnet/vnet/devices/dpdk/init.c @@ -293,11 +293,6 @@ dpdk_lib_init (dpdk_main_t * dm) vec_validate_aligned (dm->hqos_threads, tm->n_vlib_mains - 1, CLIB_CACHE_LINE_BYTES); -#ifdef NETMAP - if (rte_netmap_probe () < 0) - return clib_error_return (0, "rte netmap probe failed"); -#endif - nports = rte_eth_dev_count (); if (nports < 1) { @@ -448,6 +443,9 @@ dpdk_lib_init (dpdk_main_t * dm) else xd->pmd = VNET_DPDK_PMD_UNKNOWN; + xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN; + xd->nb_rx_desc = DPDK_NB_RX_DESC_DEFAULT; + xd->nb_tx_desc = DPDK_NB_TX_DESC_DEFAULT; switch (xd->pmd) { @@ -463,8 +461,6 @@ dpdk_lib_init (dpdk_main_t * dm) case VNET_DPDK_PMD_IXGBEVF: case VNET_DPDK_PMD_THUNDERX: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G; - xd->nb_rx_desc = DPDK_NB_RX_DESC_10GE; - xd->nb_tx_desc = DPDK_NB_TX_DESC_10GE; break; case VNET_DPDK_PMD_DPAA2: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G; @@ -474,17 +470,10 @@ dpdk_lib_init (dpdk_main_t * dm) case VNET_DPDK_PMD_ENIC: rte_eth_link_get_nowait (i, &l); xd->flags |= DPDK_DEVICE_FLAG_PMD_SUPPORTS_PTYPE; - xd->nb_rx_desc = DPDK_NB_RX_DESC_ENIC; if (l.link_speed == 40000) - { - xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G; - xd->nb_tx_desc = DPDK_NB_TX_DESC_40GE; - } + xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G; else - { - xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G; - xd->nb_tx_desc = DPDK_NB_TX_DESC_10GE; - } + xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G; break; /* Intel Fortville */ @@ -492,8 +481,6 @@ dpdk_lib_init (dpdk_main_t * dm) case VNET_DPDK_PMD_I40EVF: xd->flags |= DPDK_DEVICE_FLAG_PMD_SUPPORTS_PTYPE; xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G; - xd->nb_rx_desc = DPDK_NB_RX_DESC_40GE; - xd->nb_tx_desc = DPDK_NB_TX_DESC_40GE; switch (dev_info.pci_dev->id.device_id) { @@ -521,27 +508,74 @@ dpdk_lib_init (dpdk_main_t * dm) { case 0x540d: /* T580-CR */ case 0x5410: /* T580-LP-cr */ - xd->nb_rx_desc = DPDK_NB_RX_DESC_40GE; - xd->nb_tx_desc = DPDK_NB_TX_DESC_40GE; xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G; break; case 0x5403: /* T540-CR */ - xd->nb_rx_desc = DPDK_NB_RX_DESC_10GE; - xd->nb_tx_desc = DPDK_NB_TX_DESC_10GE; xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G; break; default: - xd->nb_rx_desc = DPDK_NB_RX_DESC_10GE; - xd->nb_tx_desc = DPDK_NB_TX_DESC_10GE; xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN; } break; + case VNET_DPDK_PMD_MLX5: + { + char *pn_100g[] = { "MCX415A-CCAT", "MCX416A-CCAT", 0 }; + char *pn_40g[] = { "MCX413A-BCAT", "MCX414A-BCAT", + "MCX415A-BCAT", "MCX416A-BCAT", "MCX4131A-BCAT", 0 + }; + char *pn_10g[] = { "MCX4111A-XCAT", "MCX4121A-XCAT", 0 }; + + vlib_pci_device_t *pd = vlib_get_pci_device (&pci_addr); + u8 *pn = 0; + char **c; + int found = 0; + pn = format (0, "%U%c", + format_vlib_pci_vpd, pd->vpd_r, "PN", 0); + + if (!pn) + break; + + c = pn_100g; + while (!found && c[0]) + { + if (strncmp ((char *) pn, c[0], strlen (c[0])) == 0) + { + xd->port_type = VNET_DPDK_PORT_TYPE_ETH_100G; + break; + } + c++; + } + + c = pn_40g; + while (!found && c[0]) + { + if (strncmp ((char *) pn, c[0], strlen (c[0])) == 0) + { + xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G; + break; + } + c++; + } + + c = pn_10g; + while (!found && c[0]) + { + if (strncmp ((char *) pn, c[0], strlen (c[0])) == 0) + { + xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G; + break; + } + c++; + } + + vec_free (pn); + } + + break; /* Intel Red Rock Canyon */ case VNET_DPDK_PMD_FM10K: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_SWITCH; - xd->nb_rx_desc = DPDK_NB_RX_DESC_40GE; - xd->nb_tx_desc = DPDK_NB_TX_DESC_40GE; break; /* virtio */ diff --git a/vpp/Makefile.am b/vpp/Makefile.am index f90cb647..25d72749 100644 --- a/vpp/Makefile.am +++ b/vpp/Makefile.am @@ -121,6 +121,9 @@ vpp_LDFLAGS += -Wl,--no-whole-archive if WITH_DPDK_CRYPTO vpp_LDADD += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB endif +if WITH_DPDK_MLX5_PMD +vpp_LDFLAGS += -libverbs -lmlx5 -lnuma +endif endif vpp_LDADD += -lvppinfra -lm -lpthread -ldl diff --git a/vpp/configure.ac b/vpp/configure.ac index 2ade3d2a..8c333fd1 100644 --- a/vpp/configure.ac +++ b/vpp/configure.ac @@ -14,6 +14,11 @@ AC_ARG_WITH(dpdk_crypto, [with_dpdk_crypto=1], [with_dpdk_crypto=0]) +AC_ARG_WITH(dpdk_mlx5_pmd, + AC_HELP_STRING([--with-dpdk-mlx5-pmd],[Use Mellanox ConnetxX-4 DPDK PMD]), + [with_dpdk_mlx5_pmd=1], + [with_dpdk_mlx5_pmd=0]) + AC_ARG_ENABLE(dpdk-shared, AC_HELP_STRING([--enable-dpdk-shared],[Link with DPDK shared lib]), [enable_dpdk_shared=1], @@ -44,7 +49,7 @@ AM_CONDITIONAL(ENABLE_DPDK_SHARED, test "$enable_dpdk_shared" = "1") AC_SUBST(DPDK,["-DDPDK=${with_dpdk} -DDPDK_SHARED_LIB=${enable_dpdk_shared}"]) AM_CONDITIONAL(WITH_DPDK_CRYPTO, test "$with_dpdk_crypto" = "1") AC_SUBST(DPDK_CRYPTO,[-DDPDK_CRYPTO=${with_dpdk_crypto}]) - +AM_CONDITIONAL(WITH_DPDK_MLX5_PMD, test "$with_dpdk_mlx5_pmd" = "1") AM_COND_IF( [ENABLE_DPDK_SHARED], -- cgit 1.2.3-korg From 68fda1c47c43a9f23ecc7cdc49528a1faa73642f Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Sun, 1 Jan 2017 16:53:22 +0100 Subject: dpdk: do not build igb_uio module igb_uio is anyway build out of source from dkms package so there is no need to build it here. Also, this creates issues in cases where kernel headers are not available (i.e. building inside containers). Change-Id: I270598a94dc67ad0b31e7f0db9ed6bd6fc8cfe30 Signed-off-by: Damjan Marion --- dpdk/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 178c30c6..82804f20 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -136,6 +136,7 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_LIBRTE_TABLE,n) $(call set,RTE_LIBRTE_PIPELINE,n) $(call set,RTE_KNI_KMOD,n) + $(call set,RTE_EAL_IGB_UIO,n) @rm -f .config.ok $(CURDIR)/$(DPDK_TARBALL): -- cgit 1.2.3-korg From 2ce7f9834ab55728520bff0dd15f8d82c10b95a0 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 9 Jan 2017 20:24:50 +0100 Subject: Add dpdk development packaging Change-Id: I6aa2a6709241d99ce734c29e47487eb456907351 Signed-off-by: Damjan Marion --- Makefile | 25 ++++--- build-data/packages/dpdk.mk | 6 +- build-data/packages/vpp.mk | 3 +- build-data/platforms.mk | 4 -- build-root/Makefile | 1 - build-root/deb/debian/.gitignore | 3 - build-root/deb/debian/control | 17 +---- build-root/deb/debian/rules | 5 +- build-root/rpm/vpp.spec | 1 - build-root/scripts/find-dpdk-contents | 29 -------- dpdk/Makefile | 126 ++++++++++++++++++++++++++++++++-- dpdk/deb/debian/compat | 1 + dpdk/deb/debian/control | 18 +++++ dpdk/deb/debian/dkms/Makefile | 10 +++ dpdk/deb/debian/rules | 38 ++++++++++ dpdk/deb/debian/vpp-dpdk-dkms.dkms | 8 +++ dpdk/dkms/Makefile | 10 --- dpdk/dkms/create_deb_manifest.sh | 28 -------- dpdk/rpm/vpp-dpdk.spec | 26 +++++++ 19 files changed, 244 insertions(+), 115 deletions(-) delete mode 100755 build-root/scripts/find-dpdk-contents create mode 100755 dpdk/deb/debian/compat create mode 100644 dpdk/deb/debian/control create mode 100644 dpdk/deb/debian/dkms/Makefile create mode 100755 dpdk/deb/debian/rules create mode 100644 dpdk/deb/debian/vpp-dpdk-dkms.dkms delete mode 100644 dpdk/dkms/Makefile delete mode 100755 dpdk/dkms/create_deb_manifest.sh create mode 100644 dpdk/rpm/vpp-dpdk.spec (limited to 'dpdk/Makefile') diff --git a/Makefile b/Makefile index f1813a30..71eec082 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,12 @@ OS_ID = $(shell grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\" OS_VERSION_ID= $(shell grep '^VERSION_ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g') endif +ifeq ($(OS_ID),ubuntu) +PKG=deb +else ifeq ($(OS_ID),centos) +PKG=rpm +endif + DEB_DEPENDS = curl build-essential autoconf automake bison libssl-dev ccache DEB_DEPENDS += debhelper dkms git libtool libganglia1-dev libapr1-dev dh-systemd DEB_DEPENDS += libconfuse-dev git-review exuberant-ctags cscope pkg-config @@ -84,6 +90,7 @@ help: @echo " run-vat - run vpp-api-test tool" @echo " pkg-deb - build DEB packages" @echo " pkg-rpm - build RPM packages" + @echo " dpdk-install-dev - install DPDK development packages" @echo " ctags - (re)generate ctags database" @echo " gtags - (re)generate gtags database" @echo " cscope - (re)generate cscope database" @@ -295,6 +302,9 @@ pkg-deb: pkg-rpm: dist $(call make,$(PLATFORM),install-rpm) +dpdk-install-dev: + make -C dpdk install-$(PKG) + ctags: ctags.files @ctags --totals --tag-relative -L $< @rm $< @@ -340,26 +350,19 @@ define banner @echo " " endef -verify: install-dep $(BR)/.bootstrap.ok +verify: install-dep $(BR)/.bootstrap.ok dpdk-install-dev $(call banner,"Building for PLATFORM=vpp using gcc") @make -C build-root PLATFORM=vpp TAG=vpp wipe-all install-packages $(call banner,"Building for PLATFORM=vpp_lite using gcc") @make -C build-root PLATFORM=vpp_lite TAG=vpp_lite wipe-all install-packages -ifeq ($(OS_ID),ubuntu) -ifeq ($(OS_VERSION_ID),16.04) +ifeq ($(OS_ID)-$(OS_VERSION_ID),ubuntu-16.04) $(call banner,"Installing dependencies") @sudo -E apt-get update @sudo -E apt-get $(CONFIRM) $(FORCE) install clang $(call banner,"Building for PLATFORM=vpp using clang") @make -C build-root CC=clang PLATFORM=vpp TAG=vpp_clang wipe-all install-packages endif - $(call banner,"Building deb packages") - @make pkg-deb -endif -ifeq ($(OS_ID),centos) - $(call banner,"Building rpm packages") - @make pkg-rpm -endif - @make test + $(call banner,"Building $(PKG) packages") + @make pkg-$(PKG) diff --git a/build-data/packages/dpdk.mk b/build-data/packages/dpdk.mk index 2dcf4093..6c136824 100644 --- a/build-data/packages/dpdk.mk +++ b/build-data/packages/dpdk.mk @@ -42,8 +42,8 @@ ifneq ($(DPDK_MAKE_EXTRA_ARGS),) DPDK_MAKE_ARGS += DPDK_MAKE_EXTRA_ARGS="$(DPDK_MAKE_EXTRA_ARGS)" endif -dpdk_configure = echo +dpdk_configure = echo -dpdk_make_args = $(DPDK_MAKE_ARGS) config +dpdk_make_args = $(DPDK_MAKE_ARGS) ebuild-build -dpdk_install = make $(DPDK_MAKE_ARGS) build +dpdk_install = make $(DPDK_MAKE_ARGS) ebuild-install diff --git a/build-data/packages/vpp.mk b/build-data/packages/vpp.mk index fe68cd82..81aeab69 100644 --- a/build-data/packages/vpp.mk +++ b/build-data/packages/vpp.mk @@ -19,8 +19,9 @@ vpp_CPPFLAGS += -I$($(PLATFORM)_dpdk_inc_dir) vpp_LDFLAGS += -L$($(PLATFORM)_dpdk_lib_dir) else vpp_configure_depend += dpdk-install -vpp_CPPFLAGS += $(call installed_includes_fn, dpdk) +vpp_CPPFLAGS += $(call installed_includes_fn, dpdk)/dpdk vpp_LDFLAGS += $(call installed_libs_fn, dpdk) +vpp_CPPFLAGS += -I/usr/include/dpdk endif ifeq ($($(PLATFORM)_uses_dpdk_cryptodev),yes) vpp_configure_args += --with-dpdk-crypto diff --git a/build-data/platforms.mk b/build-data/platforms.mk index 41836750..2351898e 100644 --- a/build-data/platforms.mk +++ b/build-data/platforms.mk @@ -63,10 +63,6 @@ install-deb: $(patsubst %,%-find-source,$(ROOT_PACKAGES)) ./scripts/find-vpp-api-python-contents $(INSTALL_PREFIX)$(ARCH) \ deb/debian/vpp-api-python.install ; \ \ - : dpdk headers ; \ - ./scripts/find-dpdk-contents $(INSTALL_PREFIX)$(ARCH) \ - deb/debian/vpp-dpdk-dev.install ; \ - \ : bin package needs startup config ; \ echo ../../src/vpp/conf/startup.conf /etc/vpp \ >> deb/debian/vpp.install ; \ diff --git a/build-root/Makefile b/build-root/Makefile index 6e26e90e..8b83990e 100644 --- a/build-root/Makefile +++ b/build-root/Makefile @@ -1167,5 +1167,4 @@ distclean: rm -rf $(MU_BUILD_ROOT_DIR)/python if [ -e /usr/bin/dh ];then (cd $(MU_BUILD_ROOT_DIR)/deb/;debian/rules clean); fi rm -f $(MU_BUILD_ROOT_DIR)/deb/debian/*.install - rm -f $(MU_BUILD_ROOT_DIR)/deb/debian/*.dkms rm -f $(MU_BUILD_ROOT_DIR)/deb/debian/changelog diff --git a/build-root/deb/debian/.gitignore b/build-root/deb/debian/.gitignore index c5e915a8..7b1028d6 100644 --- a/build-root/deb/debian/.gitignore +++ b/build-root/deb/debian/.gitignore @@ -3,12 +3,9 @@ files *debhelper* *.substvars *.install -vpp-dpdk-dkms* vpp/ vpp-dev/ vpp-lib/ -vpp-dpdk-dev/ -vpp-dpdk-dkms/ vpp-dbg/ vppctl/ vpp-api-lua/ diff --git a/build-root/deb/debian/control b/build-root/deb/debian/control index 6d26266a..e90cfca8 100644 --- a/build-root/deb/debian/control +++ b/build-root/deb/debian/control @@ -2,7 +2,7 @@ Source: vpp Section: net Priority: extra Maintainer: Cisco OpenVPP Packaging Team -Build-Depends: debhelper (>= 9), dkms, dh-systemd, dh-python, chrpath +Build-Depends: debhelper (>= 9), dh-systemd, dh-python, chrpath Standards-Version: 3.9.4 Package: vpp @@ -26,14 +26,6 @@ Description: Vector Packet Processing--development support This package contains development support files for the VPP libraries . -Package: vpp-dpdk-dev -Architecture: any -Depends: ${misc:Depends} -Description: Vector Packet Processing--development support - This package contains dpdk header files which match the dpdk version - compiled into the vpp executable - . - Package: vpp-lib Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} @@ -41,7 +33,6 @@ Description: Vector Packet Processing--runtime libraries This package contains the VPP shared libraries, including: . vppinfra - foundation library supporting vectors, hashes, bitmaps, pools, and string formatting. - dpdk - DPDK library svm - vm library vlib - vector processing library vlib-api - binary API library @@ -54,12 +45,6 @@ Description: Vector Packet Processing--runtime plugins This package contains VPP plugins . -Package: vpp-dpdk-dkms -Architecture: any -Depends: ${misc:Depends} -Description: DPDK 2.1 igb_uio_driver - This package contains Linux kernel modules distributed with DPDK. - Package: vpp-api-lua Architecture: any Depends: ${misc:Depends}, vpp (= ${source:Version}) diff --git a/build-root/deb/debian/rules b/build-root/deb/debian/rules index 4c84fc32..7046105c 100755 --- a/build-root/deb/debian/rules +++ b/build-root/deb/debian/rules @@ -18,7 +18,7 @@ include /usr/share/dpkg/default.mk # main packaging script based on dh7 syntax %: - dh $@ --with dkms --with systemd,python2 + dh $@ --with systemd,python2 override_dh_install: dh_install --exclude .git @@ -31,6 +31,3 @@ override_dh_shlibdeps: override_dh_strip: dh_strip --dbg-package=vpp-dbg - -override_dh_dkms: - dh_dkms -pvpp-dpdk-dkms diff --git a/build-root/rpm/vpp.spec b/build-root/rpm/vpp.spec index b3a337b1..9e3ad11b 100644 --- a/build-root/rpm/vpp.spec +++ b/build-root/rpm/vpp.spec @@ -43,7 +43,6 @@ Group: System Environment/Libraries %description lib This package contains the VPP shared libraries, including: vppinfra - foundation library supporting vectors, hashes, bitmaps, pools, and string formatting. -dpdk - DPDK library svm - vm library vlib - vector processing library vlib-api - binary API library diff --git a/build-root/scripts/find-dpdk-contents b/build-root/scripts/find-dpdk-contents deleted file mode 100755 index c7065139..00000000 --- a/build-root/scripts/find-dpdk-contents +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# includes -rm -rf dpdk-includes -mkdir dpdk-includes -(cd $1/dpdk/include; tar cfh - . | (cd ../../../dpdk-includes; tar xf -)) - -# If CDPATH is set, the "Change Directory" builtin (cd) will output the -# destination directory when a relative path is passed as an argument. -# In this case, this has the negative side effect of polluting the "paths" -# variable with the destination directory, breaking the package generation. -# -# Patient: Doctor! Doctor! It hurts when I do this... -# Doctor: Don't do that! -# -unset CDPATH -paths=`cd dpdk-includes; find . -type f -print` -rm -f $2 - -for path in $paths -do - dir=`dirname $path` - if [ $dir = "." ] ; then - echo ../dpdk-includes/$path /usr/include/vpp-dpdk >> $2 - else - echo ../dpdk-includes/$path /usr/include/vpp-dpdk/$dir >> $2 - fi -done - diff --git a/dpdk/Makefile b/dpdk/Makefile index 82804f20..f6f90e0d 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -27,6 +27,7 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 16.11 +PKG_SUFFIX ?= vpp1 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) @@ -81,11 +82,11 @@ endif DPDK_MAKE_ARGS := -C $(DPDK_SOURCE) -j $(JOBS) \ T=$(DPDK_TARGET) \ RTE_CONFIG_TEMPLATE=../custom-config \ - RTE_OUTPUT=$(I) \ EXTRA_CFLAGS="$(DPDK_EXTRA_CFLAGS)" \ EXTRA_LDFLAGS="$(DPDK_EXTRA_LDFLAGS)" \ CPU_CFLAGS="$(DPDK_CPU_CFLAGS)" \ CPU_LDFLAGS="$(DPDK_CPU_LDFLAGS)" \ + DESTDIR=$(I) \ $(DPDK_MAKE_EXTRA_ARGS) DPDK_SOURCE_FILES := $(shell [ -e $(DPDK_SOURCE) ] && find $(DPDK_SOURCE) -name "*.[chS]") @@ -140,7 +141,6 @@ $(B)/custom-config: $(B)/.patch.ok Makefile @rm -f .config.ok $(CURDIR)/$(DPDK_TARBALL): - @mkdir -p $(B) @if [ -e $(DPDK_DOWNLOAD_DIR)/$(DPDK_TARBALL) ] ; \ then cp $(DPDK_DOWNLOAD_DIR)/$(DPDK_TARBALL) $(CURDIR) ; \ else curl -o $(CURDIR)/$(DPDK_TARBALL) -LO $(DPDK_TAR_URL) ; \ @@ -148,6 +148,7 @@ $(CURDIR)/$(DPDK_TARBALL): @rm -f $(B)/.download.ok $(B)/.download.ok: $(CURDIR)/$(DPDK_TARBALL) + @mkdir -p $(B) @openssl md5 $< | cut -f 2 -d " " - > $(B)/$(DPDK_TARBALL).md5sum @([ "$$(<$(B)/$(DPDK_TARBALL).md5sum)" = "$(DPDK_$(DPDK_VERSION)_TARBALL_MD5_CKSUM)" ] || \ ( echo "Bad Checksum! Please remove $< and retry" && \ @@ -168,7 +169,7 @@ extract: $(B)/.extract.ok $(B)/.patch.ok: $(B)/.extract.ok ifneq ($(wildcard $(CURDIR)/dpdk-$(DPDK_VERSION)_patches/*.patch),) @echo --- patching --- - for f in $(CURDIR)/dpdk-$(DPDK_VERSION)_patches/*.patch ; do \ + @for f in $(CURDIR)/dpdk-$(DPDK_VERSION)_patches/*.patch ; do \ echo Applying patch: $$(basename $$f) ; \ patch -p1 -d $(DPDK_SOURCE) < $$f ; \ done @@ -188,13 +189,130 @@ config: $(B)/.config.ok $(B)/.build.ok: $(DPDK_SOURCE_FILES) @if [ ! -e $(B)/.config.ok ] ; then echo 'Please run "make config" first' && false ; fi @make $(DPDK_MAKE_ARGS) install - @dkms/create_deb_manifest.sh $(DPDK_VERSION) $(subst $(realpath ..)/,,$(B)) @touch $@ .PHONY: build build: $(B)/.build.ok +.PHONY: install +install: $(B)/.build.ok + .PHONY: clean clean: @rm -rf $(B) $(I) +############################################################################## +# .deb packaging +############################################################################## + +DEB_ARCH=$(shell dpkg --print-architecture 2> /dev/null) +DEV_DEB=vpp-dpdk-dev_$(DPDK_VERSION)-$(PKG_SUFFIX)_$(DEB_ARCH).deb +INSTALLED_DEB_VER=$(shell dpkg-query --showformat='$${Version}' --show vpp-dpdk-dev 2> /dev/null) + +.PHONY: build-deb install-deb check-deb + +deb/debian/changelog: Makefile + @echo "vpp-dpdk ($(DPDK_VERSION)-$(PKG_SUFFIX)) unstable; urgency=low" > $@ + @echo "" >> $@ + @echo " * DPDK Release $(DPDK_VERSION)" >> $@ + @echo "" >> $@ + @echo " -- VPP Dev $(shell date -R)" >> $@ + +$(DEV_DEB): deb/debian/changelog + @cd deb && dpkg-buildpackage -b -uc -us + git clean -fdx deb + +build-deb: $(DEV_DEB) + +install-deb: +ifneq ($(INSTALLED_DEB_VER),$(DPDK_VERSION)-$(PKG_SUFFIX)) + @make $(DEV_DEB) + @sudo dpkg -i $(DEV_DEB) +else + @echo "==========================================================" + @echo " Up-to-date DPDK package already installed" + @echo "==========================================================" +endif + +check-deb: +ifneq ($(INSTALLED_DEB_VER),$(DPDK_VERSION)-$(PKG_SUFFIX)) + @echo "==========================================================" + @echo " Outdated DPDK package detected:" + @echo " Installed: vpp-dpdk-dev $(INSTALLED_DEB_VER)" + @echo " Current: vpp-dpdk-dev $(DPDK_VERSION)-$(PKG_SUFFIX)" + @echo "" + @echo " Please upgrade by invoking 'make dpdk-install-dev'" + @echo " from the top level directory." + @echo "==========================================================" +endif + +############################################################################## +# .rpm packaging +############################################################################## + +RPM_ARCH=$(shell rpm --eval "%{_arch}" 2> /dev/null) +DEV_RPM=vpp-dpdk-devel-$(DPDK_VERSION)-$(PKG_SUFFIX).$(RPM_ARCH).rpm +INSTALLED_RPM_VER=$(shell rpm -q --queryformat '%{VERSION}-%{RELEASE}' vpp-dpdk-devel | grep -v "not inst") + +.PHONY: build-rpm install-rpm check-rpm + +$(DEV_RPM): Makefile rpm/vpp-dpdk.spec + @rpmbuild -bb \ + --define "_topdir $(CURDIR)/rpm" \ + --define "_version $(DPDK_VERSION)" \ + --define "_release $(PKG_SUFFIX)" \ + $(CURDIR)/rpm/vpp-dpdk.spec + mv rpm/RPMS/$(RPM_ARCH)/*.rpm . + git clean -fdx rpm + +build-rpm: $(DEV_RPM) + +install-rpm: +ifneq ($(INSTALLED_RPM_VER),$(DPDK_VERSION)-$(PKG_SUFFIX)) + @make $(DEV_RPM) + sudo rpm -Uih $(DEV_RPM) +else + @echo "==========================================================" + @echo " Up-to-date DPDK package already installed" + @echo "==========================================================" +endif + +check-rpm: +ifneq ($(INSTALLED_RPM_VER),$(DPDK_VERSION)-$(PKG_SUFFIX)) + @echo "==========================================================" + @echo " Outdated DPDK package detected:" + @echo " Installed: vpp-dpdk-devel $(INSTALLED_RPM_VER)" + @echo " Current: vpp-dpdk-devel $(DPDK_VERSION)-$(PKG_SUFFIX)" + @echo "" + @echo " Please upgrade by invoking 'make dpdk-install-dev'" + @echo " from the top level directory." + @echo "==========================================================" +endif + +############################################################################## +# ebuild support +############################################################################## + +.PHONY: ebuild-build ebuild-install + +ebuild-build: +ifeq ($(INSTALLED_DEB_VER)$(INSTALLED_RPM_VER),) + @echo "==========================================================" + @echo "Building DPDK from source. Consider installing development" + @echo "package by invoking 'make dpdk-install-dev' from the" + @echo "top level directory" + @echo "==========================================================" + make config +else +ifneq ($(INSTALLED_DEB_VER),) + make check-deb +endif +ifneq ($(INSTALLED_RPM_VER),) + make check-rpm +endif +endif + +ebuild-install: +ifeq ($(INSTALLED_DEB_VER)$(INSTALLED_RPM_VER),) + make install +endif diff --git a/dpdk/deb/debian/compat b/dpdk/deb/debian/compat new file mode 100755 index 00000000..ec635144 --- /dev/null +++ b/dpdk/deb/debian/compat @@ -0,0 +1 @@ +9 diff --git a/dpdk/deb/debian/control b/dpdk/deb/debian/control new file mode 100644 index 00000000..9ffa1a0a --- /dev/null +++ b/dpdk/deb/debian/control @@ -0,0 +1,18 @@ +Source: vpp-dpdk +Section: net +Priority: extra +Maintainer: vpp-dev@lists.fd.io +Build-Depends: debhelper (>= 9), dkms +Standards-Version: 3.9.4 + +Package: vpp-dpdk-dev +Architecture: any +Depends: ${shlibs:Depends} +Description: DPDK Development Package for VPP +Conflicts: dpdk, dpdk-dev, libdpdk-dev + +Package: vpp-dpdk-dkms +Architecture: any +Depends: ${misc:Depends} +Description: DPDK Development Package for VPP - Kernel Modules +Conflicts: dpdk-igb-uio-dkms diff --git a/dpdk/deb/debian/dkms/Makefile b/dpdk/deb/debian/dkms/Makefile new file mode 100644 index 00000000..452c7c26 --- /dev/null +++ b/dpdk/deb/debian/dkms/Makefile @@ -0,0 +1,10 @@ +obj-m:=igb_uio.o + +CONFIG_MODULE_SIG=n + +EXTRA_CFLAGS += -Winline -I$(PWD) + +default: + $(MAKE) -C /lib/modules/$(shell uname -r)/build M=$(shell pwd) modules +clean: + $(MAKE) -C /lib/modules/$(shell uname -r)/build M=$(shell pwd) clean diff --git a/dpdk/deb/debian/rules b/dpdk/deb/debian/rules new file mode 100755 index 00000000..98b1048b --- /dev/null +++ b/dpdk/deb/debian/rules @@ -0,0 +1,38 @@ +#!/usr/bin/make -f +DH_VERBOSE = 1 +PKG=vpp-dpdk + +VERSION = $(shell dpkg-parsechangelog | sed -nr '/^Version:/s/Version: //p') +BASE_VER = $(word 1, $(subst -, ,$(VERSION))) + +export DPDK_BUILD_DIR=$(CURDIR)/_build +export DPDK_INSTALL_DIR=$(CURDIR)/debian/tmp/usr + +SRC=_build/dpdk-$(BASE_VER) + +MAKE_ARGS=-C .. + +include /usr/share/dpkg/default.mk + +%: + dh $@ --with dkms + +override_dh_clean: + make $(MAKE_ARGS) clean + +override_dh_auto_configure: + make $(MAKE_ARGS) config + +override_dh_install: + make $(MAKE_ARGS) install + dh_install -p$(PKG)-dkms \ + $(SRC)/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h \ + $(SRC)/lib/librte_eal/common/include/rte_pci_dev_features.h \ + $(SRC)/lib/librte_eal/linuxapp/igb_uio/igb_uio.c \ + $(SRC)/lib/librte_eal/linuxapp/igb_uio/compat.h \ + debian/dkms/Makefile \ + /usr/src/$(PKG)-dkms-$(VERSION) + dh_install -p$(PKG)-dev --autodest /usr + +override_dh_dkms: + dh_dkms -p$(PKG)-dkms -V $(VERSION) diff --git a/dpdk/deb/debian/vpp-dpdk-dkms.dkms b/dpdk/deb/debian/vpp-dpdk-dkms.dkms new file mode 100644 index 00000000..a166c313 --- /dev/null +++ b/dpdk/deb/debian/vpp-dpdk-dkms.dkms @@ -0,0 +1,8 @@ +PACKAGE_VERSION=#MODULE_VERSION# +PACKAGE_NAME="vpp-dpdk-dkms" +CLEAN="make clean" +BUILT_MODULE_NAME[0]="igb_uio" +BUILT_MODULE_LOCATION[0]="./" +DEST_MODULE_LOCATION[0]="/kernel/net" +MAKE[1]="make" +AUTOINSTALL="yes" diff --git a/dpdk/dkms/Makefile b/dpdk/dkms/Makefile deleted file mode 100644 index 452c7c26..00000000 --- a/dpdk/dkms/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -obj-m:=igb_uio.o - -CONFIG_MODULE_SIG=n - -EXTRA_CFLAGS += -Winline -I$(PWD) - -default: - $(MAKE) -C /lib/modules/$(shell uname -r)/build M=$(shell pwd) modules -clean: - $(MAKE) -C /lib/modules/$(shell uname -r)/build M=$(shell pwd) clean diff --git a/dpdk/dkms/create_deb_manifest.sh b/dpdk/dkms/create_deb_manifest.sh deleted file mode 100755 index f8305588..00000000 --- a/dpdk/dkms/create_deb_manifest.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/sh - -VER=$1 -DPDK_ROOT=../../$2/dpdk-${VER} -DEBIAN_DIR=../build-root/deb/debian -SRC_DIR=/usr/src/vpp-dpdk-dkms-${VER}/ - - -cat > ${DEBIAN_DIR}/vpp-dpdk-dkms.install << _EOF_ -${DPDK_ROOT}/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h ${SRC_DIR} -${DPDK_ROOT}/lib/librte_eal/common/include/rte_pci_dev_features.h ${SRC_DIR} -${DPDK_ROOT}/lib/librte_eal/linuxapp/igb_uio/igb_uio.c ${SRC_DIR} -${DPDK_ROOT}/lib/librte_eal/linuxapp/igb_uio/compat.h ${SRC_DIR} -../../dpdk/dkms/Makefile ${SRC_DIR} -_EOF_ - - -# dkms config -cat > ${DEBIAN_DIR}/vpp-dpdk-dkms.dkms << _EOF_ -PACKAGE_VERSION="${VER}" -PACKAGE_NAME="vpp-dpdk-dkms" -CLEAN="make clean" -BUILT_MODULE_NAME[0]="igb_uio" -BUILT_MODULE_LOCATION[0]="./" -DEST_MODULE_LOCATION[0]="/kernel/net" -MAKE[1]="make" -AUTOINSTALL="yes" -_EOF_ diff --git a/dpdk/rpm/vpp-dpdk.spec b/dpdk/rpm/vpp-dpdk.spec new file mode 100644 index 00000000..cc617681 --- /dev/null +++ b/dpdk/rpm/vpp-dpdk.spec @@ -0,0 +1,26 @@ +%define _make_args -C ../.. DPDK_BUILD_DIR=%{_topdir}/tmp DPDK_INSTALL_DIR=%{buildroot}/usr + +Name: vpp-dpdk +Version: %{_version} +Release: %{_release} +Summary: DPDK development packages for VPP +License: BSD + +%description + +%package devel +Summary: DPDK development package for VPP +Group: Development/Libraries + +%description devel + +%install +make %{_make_args} config +make %{_make_args} install + +%files devel +/usr/bin/* +/usr/include/dpdk +/usr/lib/* +/usr/sbin/* +/usr/share/dpdk -- cgit 1.2.3-korg From 248968be0fd2ef4d19b6c6ce1b019670ee8b1025 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Fri, 20 Jan 2017 23:59:28 +0100 Subject: Fix cosmetic issue in dpdk/Makefile Change-Id: I95684396e3dad53ddf7479467a36a5b68e5703cf Signed-off-by: Damjan Marion --- dpdk/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index f6f90e0d..586d2425 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -252,7 +252,7 @@ endif RPM_ARCH=$(shell rpm --eval "%{_arch}" 2> /dev/null) DEV_RPM=vpp-dpdk-devel-$(DPDK_VERSION)-$(PKG_SUFFIX).$(RPM_ARCH).rpm -INSTALLED_RPM_VER=$(shell rpm -q --queryformat '%{VERSION}-%{RELEASE}' vpp-dpdk-devel | grep -v "not inst") +INSTALLED_RPM_VER=$(shell rpm -q --queryformat '%{VERSION}-%{RELEASE}' vpp-dpdk-devel 2> /dev/null | grep -v "not inst") .PHONY: build-rpm install-rpm check-rpm -- cgit 1.2.3-korg From d04b60bfa940e21ab4676a1cb3c15989748be40a Mon Sep 17 00:00:00 2001 From: Sergio Gonzalez Monroy Date: Fri, 20 Jan 2017 15:35:23 +0000 Subject: dpdk: rework cryptodev ipsec build and setup Build Cryptodev IPsec support by default when DPDK is enabled but only build hardware Cryptodev PMDs. To enable Cryptodev support, a new startup.conf option for dpdk has been introduced 'enable-cryptodev'. During VPP init, if Cryptodev support is not enabled or not enough cryptodev resources are available then default to OpenSSL ipsec implementation. Change-Id: I5aa7e0d5c2676bdb41d775ef40364536a081956d Signed-off-by: Sergio Gonzalez Monroy --- build-data/packages/dpdk.mk | 6 +- build-data/packages/vpp.mk | 4 +- build-data/platforms/vpp.mk | 2 +- dpdk/Makefile | 10 +- src/Makefile.am | 2 +- src/configure.ac | 4 +- src/vat/api_format.c | 31 --- src/vnet.am | 4 +- src/vnet/devices/dpdk/dpdk.h | 1 + src/vnet/devices/dpdk/format.c | 3 + src/vnet/devices/dpdk/init.c | 3 + src/vnet/devices/dpdk/ipsec/cli.c | 8 + src/vnet/devices/dpdk/ipsec/crypto_node.c | 25 +- .../devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md | 37 ++- src/vnet/devices/dpdk/ipsec/esp.h | 95 ++------ src/vnet/devices/dpdk/ipsec/esp_decrypt.c | 11 +- src/vnet/devices/dpdk/ipsec/esp_encrypt.c | 11 +- src/vnet/devices/dpdk/ipsec/ipsec.c | 257 +++++++++++++++------ src/vnet/devices/dpdk/ipsec/ipsec.h | 2 +- src/vnet/ipsec-gre/interface.c | 8 +- src/vnet/ipsec/ipsec.c | 51 ++-- src/vnet/ipsec/ipsec.h | 63 +++-- src/vnet/ipsec/ipsec_api.c | 43 +--- src/vnet/ipsec/ipsec_cli.c | 31 +-- src/vnet/ipsec/ipsec_if.c | 73 ++++-- src/vnet/ipsec/ipsec_if_in.c | 24 +- src/vnet/ipsec/ipsec_if_out.c | 22 +- src/vnet/ipsec/ipsec_input.c | 24 +- src/vnet/ipsec/ipsec_output.c | 20 -- 29 files changed, 452 insertions(+), 423 deletions(-) (limited to 'dpdk/Makefile') diff --git a/build-data/packages/dpdk.mk b/build-data/packages/dpdk.mk index 6c136824..6938392c 100644 --- a/build-data/packages/dpdk.mk +++ b/build-data/packages/dpdk.mk @@ -22,9 +22,9 @@ DPDK_MAKE_ARGS = -C $(call find_source_fn,$(PACKAGE_SOURCE)) \ DPDK_TUNE=$(DPDK_TUNE) \ DPDK_DEBUG=$(DPDK_DEBUG) -DPDK_CRYPTO_PMD=$(strip $($(PLATFORM)_uses_dpdk_cryptodev)) -ifneq ($(DPDK_CRYPTO_PMD),) -DPDK_MAKE_ARGS += DPDK_CRYPTO_PMD=y +DPDK_CRYPTO_SW_PMD=$(strip $($(PLATFORM)_uses_dpdk_cryptodev_sw)) +ifneq ($(DPDK_CRYPTO_SW_PMD),) +DPDK_MAKE_ARGS += DPDK_CRYPTO_SW_PMD=y endif DPDK_MLX5_PMD=$(strip $($(PLATFORM)_uses_dpdk_mlx5_pmd)) diff --git a/build-data/packages/vpp.mk b/build-data/packages/vpp.mk index 81aeab69..64eb0d89 100644 --- a/build-data/packages/vpp.mk +++ b/build-data/packages/vpp.mk @@ -23,8 +23,8 @@ vpp_CPPFLAGS += $(call installed_includes_fn, dpdk)/dpdk vpp_LDFLAGS += $(call installed_libs_fn, dpdk) vpp_CPPFLAGS += -I/usr/include/dpdk endif -ifeq ($($(PLATFORM)_uses_dpdk_cryptodev),yes) -vpp_configure_args += --with-dpdk-crypto +ifeq ($($(PLATFORM)_uses_dpdk_cryptodev_sw),yes) +vpp_configure_args += --with-dpdk-crypto-sw endif ifeq ($($(PLATFORM)_uses_dpdk_mlx5_pmd),yes) vpp_configure_args += --with-dpdk-mlx5-pmd diff --git a/build-data/platforms/vpp.mk b/build-data/platforms/vpp.mk index dd6f9dc2..5b200587 100644 --- a/build-data/platforms/vpp.mk +++ b/build-data/platforms/vpp.mk @@ -44,7 +44,7 @@ vpp_configure_args_vpp = --with-dpdk vlib_configure_args_vpp = --with-pre-data=128 # DPDK configuration parameters -# vpp_uses_dpdk_cryptodev = yes +# vpp_uses_dpdk_cryptodev_sw = yes # vpp_uses_dpdk_mlx5_pmd = yes # vpp_uses_external_dpdk = yes # vpp_dpdk_inc_dir = /usr/include/dpdk diff --git a/dpdk/Makefile b/dpdk/Makefile index 586d2425..22e97878 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -21,13 +21,13 @@ DPDK_DOWNLOAD_DIR ?= $(HOME)/Downloads DPDK_MARCH ?= native DPDK_TUNE ?= generic DPDK_DEBUG ?= n -DPDK_CRYPTO_PMD ?= n +DPDK_CRYPTO_SW_PMD ?= n DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 16.11 -PKG_SUFFIX ?= vpp1 +PKG_SUFFIX ?= vpp2 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) @@ -121,9 +121,9 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_LIBRTE_VMXNET3_DEBUG_INIT,$(DPDK_DEBUG)) $(call set,RTE_LIBRTE_PMD_BOND,y) $(call set,RTE_LIBRTE_IP_FRAG,y) - $(call set,RTE_LIBRTE_PMD_AESNI_MB,$(DPDK_CRYPTO_PMD)) - $(call set,RTE_LIBRTE_PMD_AESNI_GCM,$(DPDK_CRYPTO_PMD)) - $(call set,RTE_LIBRTE_PMD_QAT,$(DPDK_CRYPTO_PMD)) + $(call set,RTE_LIBRTE_PMD_QAT,y) + $(call set,RTE_LIBRTE_PMD_AESNI_MB,$(DPDK_CRYPTO_SW_PMD)) + $(call set,RTE_LIBRTE_PMD_AESNI_GCM,$(DPDK_CRYPTO_SW_PMD)) $(call set,RTE_LIBRTE_MLX5_PMD,$(DPDK_MLX5_PMD)) @# not needed $(call set,RTE_LIBRTE_TIMER,n) diff --git a/src/Makefile.am b/src/Makefile.am index 5e248972..239afeac 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -47,7 +47,7 @@ DPDK_LD_FLAGS = -Wl,--whole-archive,-ldpdk,--no-whole-archive else DPDK_LD_FLAGS = -Wl,--whole-archive,-l:libdpdk.a,--no-whole-archive,-lm,-ldl endif -if WITH_DPDK_CRYPTO +if WITH_DPDK_CRYPTO_SW DPDK_LD_ADD = -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB endif if WITH_DPDK_MLX5_PMD diff --git a/src/configure.ac b/src/configure.ac index fbedabf0..49da6248 100644 --- a/src/configure.ac +++ b/src/configure.ac @@ -98,7 +98,7 @@ DISABLE_ARG(japi, [Disable Java API bindings]) # --with-X WITH_ARG(dpdk, [Use use DPDK]) -WITH_ARG(dpdk_crypto, [Use DPDK cryptodev]) +WITH_ARG(dpdk_crypto_sw,[Use DPDK cryptodev SW PMDs]) WITH_ARG(dpdk_mlx5_pmd, [Use DPDK with mlx5 PMD]) # --without-X @@ -132,7 +132,7 @@ AC_SUBST(APICLI, [-DVPP_API_TEST_BUILTIN=${n_with_apicli}]) AC_DEFINE_UNQUOTED(DPDK, [${n_with_dpdk}]) AC_DEFINE_UNQUOTED(DPDK_SHARED_LIB, [${n_enable_dpdk_shared}]) -AC_DEFINE_UNQUOTED(DPDK_CRYPTO, [${n_with_dpdk_crypto}]) +AC_DEFINE_UNQUOTED(DPDK_CRYPTO_SW, [${n_with_dpdk_crypto_sw}]) AC_DEFINE_UNQUOTED(WITH_LIBSSL, [${n_with_libssl}]) diff --git a/src/vat/api_format.c b/src/vat/api_format.c index 4cfe4a58..6b8c5fb9 100644 --- a/src/vat/api_format.c +++ b/src/vat/api_format.c @@ -12199,11 +12199,7 @@ api_ipsec_sad_add_del_entry (vat_main_t * vam) if (unformat (i, "integ_alg %U", unformat_ipsec_integ_alg, &integ_alg)) { -#if DPDK_CRYPTO==1 - if (integ_alg < IPSEC_INTEG_ALG_NONE || -#else if (integ_alg < IPSEC_INTEG_ALG_SHA1_96 || -#endif integ_alg >= IPSEC_INTEG_N_ALG) { clib_warning ("unsupported integ-alg: '%U'", @@ -12221,33 +12217,6 @@ api_ipsec_sad_add_del_entry (vat_main_t * vam) } -#if DPDK_CRYPTO==1 - /*Special cases, aes-gcm-128 encryption */ - if (crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) - { - if (integ_alg != IPSEC_INTEG_ALG_NONE - && integ_alg != IPSEC_INTEG_ALG_AES_GCM_128) - { - clib_warning - ("unsupported: aes-gcm-128 crypto-alg needs none as integ-alg"); - return -99; - } - else /*set integ-alg internally to aes-gcm-128 */ - integ_alg = IPSEC_INTEG_ALG_AES_GCM_128; - } - else if (integ_alg == IPSEC_INTEG_ALG_AES_GCM_128) - { - clib_warning ("unsupported integ-alg: aes-gcm-128"); - return -99; - } - else if (integ_alg == IPSEC_INTEG_ALG_NONE) - { - clib_warning ("unsupported integ-alg: none"); - return -99; - } -#endif - - M (IPSEC_SAD_ADD_DEL_ENTRY, ipsec_sad_add_del_entry); mp->sad_id = ntohl (sad_id); diff --git a/src/vnet.am b/src/vnet.am index 28a1b19a..96cfa557 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -400,7 +400,7 @@ libvnet_la_SOURCES += \ API_FILES += vnet/ipsec/ipsec.api -if WITH_DPDK_CRYPTO +if WITH_DPDK libvnet_la_SOURCES += \ vnet/devices/dpdk/ipsec/esp_encrypt.c \ vnet/devices/dpdk/ipsec/esp_decrypt.c \ @@ -419,7 +419,7 @@ nobase_include_HEADERS += \ vnet/ipsec/ikev2.h \ vnet/ipsec/ikev2_priv.h \ vnet/ipsec/ipsec.api.h -if WITH_DPDK_CRYPTO +if WITH_DPDK nobase_include_HEADERS += \ vnet/devices/dpdk/ipsec/ipsec.h \ vnet/devices/dpdk/ipsec/esp.h diff --git a/src/vnet/devices/dpdk/dpdk.h b/src/vnet/devices/dpdk/dpdk.h index a91e87df..1b54460e 100644 --- a/src/vnet/devices/dpdk/dpdk.h +++ b/src/vnet/devices/dpdk/dpdk.h @@ -338,6 +338,7 @@ typedef struct u8 *uio_driver_name; u8 no_multi_seg; u8 enable_tcp_udp_checksum; + u8 cryptodev; /* Required config parameters */ u8 coremask_set_manually; diff --git a/src/vnet/devices/dpdk/format.c b/src/vnet/devices/dpdk/format.c index ff7c7a5a..cc0d71af 100644 --- a/src/vnet/devices/dpdk/format.c +++ b/src/vnet/devices/dpdk/format.c @@ -684,6 +684,8 @@ format_dpdk_rte_mbuf (u8 * s, va_list * va) return s; } +/* FIXME is this function used? */ +#if 0 uword unformat_socket_mem (unformat_input_t * input, va_list * va) { @@ -710,6 +712,7 @@ unformat_socket_mem (unformat_input_t * input, va_list * va) done: return 1; } +#endif clib_error_t * unformat_rss_fn (unformat_input_t * input, uword * rss_fn) diff --git a/src/vnet/devices/dpdk/init.c b/src/vnet/devices/dpdk/init.c index 3fa656ea..01ef48cb 100755 --- a/src/vnet/devices/dpdk/init.c +++ b/src/vnet/devices/dpdk/init.c @@ -1054,6 +1054,9 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) else if (unformat (input, "no-multi-seg")) conf->no_multi_seg = 1; + else if (unformat (input, "enable-cryptodev")) + conf->cryptodev = 1; + else if (unformat (input, "dev default %U", unformat_vlib_cli_sub_input, &sub_input)) { diff --git a/src/vnet/devices/dpdk/ipsec/cli.c b/src/vnet/devices/dpdk/ipsec/cli.c index 3b634e03..93df4a64 100644 --- a/src/vnet/devices/dpdk/ipsec/cli.c +++ b/src/vnet/devices/dpdk/ipsec/cli.c @@ -14,15 +14,23 @@ */ #include +#include #include static void dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display) { + dpdk_config_main_t *conf = &dpdk_config_main; dpdk_crypto_main_t *dcm = &dpdk_crypto_main; vlib_thread_main_t *tm = vlib_get_thread_main (); u32 i, skip_master; + if (!conf->cryptodev) + { + vlib_cli_output (vm, "DPDK Cryptodev support is disabled\n"); + return; + } + if (detail_display) vlib_cli_output (vm, "worker\t%10s\t%15s\tdir\tdev\tqp\n", "cipher", "auth"); diff --git a/src/vnet/devices/dpdk/ipsec/crypto_node.c b/src/vnet/devices/dpdk/ipsec/crypto_node.c index 7b32704e..e8fef235 100644 --- a/src/vnet/devices/dpdk/ipsec/crypto_node.c +++ b/src/vnet/devices/dpdk/ipsec/crypto_node.c @@ -22,6 +22,8 @@ #include #include +#include +#include #include #define foreach_dpdk_crypto_input_next \ @@ -183,24 +185,27 @@ dpdk_crypto_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, return n_deq; } +/* *INDENT-OFF* */ VLIB_REGISTER_NODE (dpdk_crypto_input_node) = { - .function = dpdk_crypto_input_fn,.name = "dpdk-crypto-input",.format_trace = - format_dpdk_crypto_input_trace,.type = VLIB_NODE_TYPE_INPUT,.state = - VLIB_NODE_STATE_DISABLED,.n_errors = - DPDK_CRYPTO_INPUT_N_ERROR,.error_strings = - dpdk_crypto_input_error_strings,.n_next_nodes = - DPDK_CRYPTO_INPUT_N_NEXT,.next_nodes = + .function = dpdk_crypto_input_fn, + .name = "dpdk-crypto-input", + .format_trace = format_dpdk_crypto_input_trace, + .type = VLIB_NODE_TYPE_INPUT, + .state = VLIB_NODE_STATE_DISABLED, + .n_errors = DPDK_CRYPTO_INPUT_N_ERROR, + .error_strings = dpdk_crypto_input_error_strings, + .n_next_nodes = DPDK_CRYPTO_INPUT_N_NEXT, + .next_nodes = { #define _(s,n) [DPDK_CRYPTO_INPUT_NEXT_##s] = n, foreach_dpdk_crypto_input_next #undef _ - } -,}; + }, +}; +/* *INDENT-ON* */ -#if DPDK_CRYPTO==1 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_crypto_input_node, dpdk_crypto_input_fn) -#endif /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md b/src/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md index 8089696f..fed2fe0e 100644 --- a/src/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md +++ b/src/vnet/devices/dpdk/ipsec/dpdk_crypto_ipsec_doc.md @@ -7,43 +7,55 @@ This document is meant to contain all related information about implementation a DPDK Cryptodev is an asynchronous crypto API that supports both Hardware and Software implementations (for more details refer to [DPDK Cryptography Device Library documentation](http://dpdk.org/doc/guides/prog_guide/cryptodev_lib.html)). -When DPDK Cryptodev support is enabled, the node graph is modified by adding and replacing some of the nodes. - -The following nodes are replaced: -* esp-encrypt -> dpdk-esp-encrypt -* esp-decrypt -> dpdk-esp-decrypt +When DPDK support is enabled and there are enough Cryptodev resources for all workers, the node graph is reconfigured by adding and changing default next nodes. The following nodes are added: * dpdk-crypto-input : polling input node, basically dequeuing from crypto devices. +* dpdk-esp-encrypt : internal node. +* dpdk-esp-decrypt : internal node. * dpdk-esp-encrypt-post : internal node. * dpdk-esp-decrypt-post : internal node. +Set new default next nodes: +* for esp encryption: esp-encrypt -> dpdk-esp-encrypt +* for esp decryption: esp-decrypt -> dpdk-esp-decrypt + ### How to enable VPP IPSec with DPDK Cryptodev support -To enable DPDK Cryptodev support (disabled by default), we need the following env option: +DPDK Cryptodev is supported in DPDK enabled VPP. +By default, only HW Cryptodev is supported but needs to be explicetly enabled with the following config option: + +``` +dpdk { + enable-cryptodev +} +``` + +To enable SW Cryptodev support (AESNI-MB-PMD and GCM-PMD), we need the following env option: - vpp_uses_dpdk_cryptodev=yes + vpp_uses_dpdk_cryptodev_sw=yes A couple of ways to achive this: * uncomment/add it in the platforms config (ie. build-data/platforms/vpp.mk) -* set the option when building vpp (ie. make vpp_uses_dpdk_cryptodev=yes build-release) +* set the option when building vpp (ie. make vpp_uses_dpdk_cryptodev_sw=yes build-release) + +When enabling SW Cryptodev support, it means that you need to pre-build the required crypto libraries needed by those SW Cryptodev PMDs. ### Crypto Resources allocation VPP allocates crypto resources based on a best effort approach: * first allocate Hardware crypto resources, then Software. -* if there are not enough crypto resources for all workers, all packets will be dropped if they reach ESP encrypt/decrypt nodes, displaying the warning: +* if there are not enough crypto resources for all workers, the graph node is not modifed, therefore the default VPP IPsec implementation based in OpenSSL is used. The following message is displayed: 0: dpdk_ipsec_init: not enough cryptodevs for ipsec ### Configuration example -No especial IPsec configuration is required. - -Once DPDK Cryptodev is enabled, the user just needs to provide cryptodevs in the startup.conf. +To enable DPDK Cryptodev the user just need to provide the startup.conf option +as mentioned previously. Example startup.conf: @@ -53,6 +65,7 @@ dpdk { num-mbufs 131072 dev 0000:81:00.0 dev 0000:81:00.1 + enable-cryptodev dev 0000:85:01.0 dev 0000:85:01.1 vdev cryptodev_aesni_mb_pmd,socket_id=1 diff --git a/src/vnet/devices/dpdk/ipsec/esp.h b/src/vnet/devices/dpdk/ipsec/esp.h index 7ef90c49..d414d679 100644 --- a/src/vnet/devices/dpdk/ipsec/esp.h +++ b/src/vnet/devices/dpdk/ipsec/esp.h @@ -97,60 +97,11 @@ dpdk_esp_init () } static_always_inline int -add_del_sa_sess (u32 sa_index, u8 is_add) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_worker_main_t *cwm; - u8 skip_master = vlib_num_workers () > 0; - - /* *INDENT-OFF* */ - vec_foreach (cwm, dcm->workers_main) - { - crypto_sa_session_t *sa_sess; - u8 is_outbound; - - if (skip_master) - { - skip_master = 0; - continue; - } - - for (is_outbound = 0; is_outbound < 2; is_outbound++) - { - if (is_add) - { - pool_get (cwm->sa_sess_d[is_outbound], sa_sess); - } - else - { - u8 dev_id; - - sa_sess = pool_elt_at_index (cwm->sa_sess_d[is_outbound], sa_index); - dev_id = cwm->qp_data[sa_sess->qp_index].dev_id; - - if (!sa_sess->sess) - continue; - - if (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess)) - { - clib_warning("failed to free session"); - return -1; - } - memset(sa_sess, 0, sizeof(sa_sess[0])); - } - } - } - /* *INDENT-OFF* */ - - return 0; -} - -static_always_inline int -translate_crypto_algo(ipsec_crypto_alg_t crypto_algo, - struct rte_crypto_sym_xform *cipher_xform) +translate_crypto_algo (ipsec_crypto_alg_t crypto_algo, + struct rte_crypto_sym_xform *cipher_xform) { switch (crypto_algo) - { + { case IPSEC_CRYPTO_ALG_NONE: cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_NULL; break; @@ -164,7 +115,7 @@ translate_crypto_algo(ipsec_crypto_alg_t crypto_algo, break; default: return -1; - } + } cipher_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; @@ -172,10 +123,11 @@ translate_crypto_algo(ipsec_crypto_alg_t crypto_algo, } static_always_inline int -translate_integ_algo(ipsec_integ_alg_t integ_alg, - struct rte_crypto_sym_xform *auth_xform, int use_esn) +translate_integ_algo (ipsec_integ_alg_t integ_alg, + struct rte_crypto_sym_xform *auth_xform, int use_esn) { - switch (integ_alg) { + switch (integ_alg) + { case IPSEC_INTEG_ALG_NONE: auth_xform->auth.algo = RTE_CRYPTO_AUTH_NULL; auth_xform->auth.digest_length = 0; @@ -203,11 +155,11 @@ translate_integ_algo(ipsec_integ_alg_t integ_alg, case IPSEC_INTEG_ALG_AES_GCM_128: auth_xform->auth.algo = RTE_CRYPTO_AUTH_AES_GCM; auth_xform->auth.digest_length = 16; - auth_xform->auth.add_auth_data_length = use_esn? 12 : 8; + auth_xform->auth.add_auth_data_length = use_esn ? 12 : 8; break; default: return -1; - } + } auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; @@ -215,25 +167,26 @@ translate_integ_algo(ipsec_integ_alg_t integ_alg, } static_always_inline int -create_sym_sess(ipsec_sa_t *sa, crypto_sa_session_t *sa_sess, u8 is_outbound) +create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess, + u8 is_outbound) { - u32 cpu_index = os_get_cpu_number(); - dpdk_crypto_main_t * dcm = &dpdk_crypto_main; + u32 cpu_index = os_get_cpu_number (); + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; - struct rte_crypto_sym_xform cipher_xform = {0}; - struct rte_crypto_sym_xform auth_xform = {0}; + struct rte_crypto_sym_xform cipher_xform = { 0 }; + struct rte_crypto_sym_xform auth_xform = { 0 }; struct rte_crypto_sym_xform *xfs; uword key = 0, *data; - crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *)&key; + crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key; if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) { sa->crypto_key_len -= 4; - clib_memcpy(&sa->salt, &sa->crypto_key[sa->crypto_key_len], 4); + clib_memcpy (&sa->salt, &sa->crypto_key[sa->crypto_key_len], 4); } else { - sa->salt = (u32) rand(); + sa->salt = (u32) rand (); } cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; @@ -244,11 +197,11 @@ create_sym_sess(ipsec_sa_t *sa, crypto_sa_session_t *sa_sess, u8 is_outbound) auth_xform.auth.key.data = sa->integ_key; auth_xform.auth.key.length = sa->integ_key_len; - if (translate_crypto_algo(sa->crypto_alg, &cipher_xform) < 0) + if (translate_crypto_algo (sa->crypto_alg, &cipher_xform) < 0) return -1; p_key->cipher_algo = cipher_xform.cipher.algo; - if (translate_integ_algo(sa->integ_alg, &auth_xform, sa->use_esn) < 0) + if (translate_integ_algo (sa->integ_alg, &auth_xform, sa->use_esn) < 0) return -1; p_key->auth_algo = auth_xform.auth.algo; @@ -269,17 +222,17 @@ create_sym_sess(ipsec_sa_t *sa, crypto_sa_session_t *sa_sess, u8 is_outbound) p_key->is_outbound = is_outbound; - data = hash_get(cwm->algo_qp_map, key); + data = hash_get (cwm->algo_qp_map, key); if (!data) return -1; sa_sess->sess = - rte_cryptodev_sym_session_create(cwm->qp_data[*data].dev_id, xfs); + rte_cryptodev_sym_session_create (cwm->qp_data[*data].dev_id, xfs); if (!sa_sess->sess) return -1; - sa_sess->qp_index = (u8)*data; + sa_sess->qp_index = (u8) * data; return 0; } diff --git a/src/vnet/devices/dpdk/ipsec/esp_decrypt.c b/src/vnet/devices/dpdk/ipsec/esp_decrypt.c index 89ab9f9b..53b2d122 100644 --- a/src/vnet/devices/dpdk/ipsec/esp_decrypt.c +++ b/src/vnet/devices/dpdk/ipsec/esp_decrypt.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #define foreach_esp_decrypt_next \ _(DROP, "error-drop") \ @@ -189,7 +191,14 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, if (PREDICT_FALSE(!sa_sess->sess)) { int ret = create_sym_sess(sa0, sa_sess, 0); - ASSERT(ret == 0); + + if (PREDICT_FALSE (ret)) + { + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } } sess = sa_sess->sess; diff --git a/src/vnet/devices/dpdk/ipsec/esp_encrypt.c b/src/vnet/devices/dpdk/ipsec/esp_encrypt.c index 10bb4616..b6f00004 100644 --- a/src/vnet/devices/dpdk/ipsec/esp_encrypt.c +++ b/src/vnet/devices/dpdk/ipsec/esp_encrypt.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #define foreach_esp_encrypt_next \ _(DROP, "error-drop") \ @@ -179,7 +181,14 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, if (PREDICT_FALSE (!sa_sess->sess)) { int ret = create_sym_sess (sa0, sa_sess, 1); - ASSERT (ret == 0); + + if (PREDICT_FALSE (ret)) + { + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } } qp_index = sa_sess->qp_index; diff --git a/src/vnet/devices/dpdk/ipsec/ipsec.c b/src/vnet/devices/dpdk/ipsec/ipsec.c index de253f02..05c17c99 100644 --- a/src/vnet/devices/dpdk/ipsec/ipsec.c +++ b/src/vnet/devices/dpdk/ipsec/ipsec.c @@ -15,24 +15,69 @@ #include #include #include +#include +#include + #include #include #include -#include -#define DPDK_CRYPTO_NB_OBJS 2048 +#define DPDK_CRYPTO_NB_SESS_OBJS 20000 #define DPDK_CRYPTO_CACHE_SIZE 512 #define DPDK_CRYPTO_PRIV_SIZE 128 -#define DPDK_CRYPTO_N_QUEUE_DESC 512 +#define DPDK_CRYPTO_N_QUEUE_DESC 1024 #define DPDK_CRYPTO_NB_COPS (1024 * 4) -/* - * return: - * -1: update failed - * 0: already exist - * 1: mapped - */ static int +add_del_sa_sess (u32 sa_index, u8 is_add) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_worker_main_t *cwm; + u8 skip_master = vlib_num_workers () > 0; + + /* *INDENT-OFF* */ + vec_foreach (cwm, dcm->workers_main) + { + crypto_sa_session_t *sa_sess; + u8 is_outbound; + + if (skip_master) + { + skip_master = 0; + continue; + } + + for (is_outbound = 0; is_outbound < 2; is_outbound++) + { + if (is_add) + { + pool_get (cwm->sa_sess_d[is_outbound], sa_sess); + } + else + { + u8 dev_id; + + sa_sess = pool_elt_at_index (cwm->sa_sess_d[is_outbound], sa_index); + dev_id = cwm->qp_data[sa_sess->qp_index].dev_id; + + if (!sa_sess->sess) + continue; + + if (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess)) + { + clib_warning("failed to free session"); + return -1; + } + memset(sa_sess, 0, sizeof(sa_sess[0])); + } + } + } + /* *INDENT-OFF* */ + + return 0; +} + +static void update_qp_data (crypto_worker_main_t * cwm, u8 cdev_id, u16 qp_id, u8 is_outbound, u16 * idx) { @@ -45,7 +90,7 @@ update_qp_data (crypto_worker_main_t * cwm, if (qpd->dev_id == cdev_id && qpd->qp_id == qp_id && qpd->is_outbound == is_outbound) - return 0; + return; } /* *INDENT-ON* */ @@ -54,13 +99,10 @@ update_qp_data (crypto_worker_main_t * cwm, qpd->dev_id = cdev_id; qpd->qp_id = qp_id; qpd->is_outbound = is_outbound; - - return 1; } /* * return: - * -1: error * 0: already exist * 1: mapped */ @@ -70,7 +112,6 @@ add_mapping (crypto_worker_main_t * cwm, const struct rte_cryptodev_capabilities *cipher_cap, const struct rte_cryptodev_capabilities *auth_cap) { - int mapped; u16 qp_index; uword key = 0, data, *ret; crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key; @@ -83,17 +124,12 @@ add_mapping (crypto_worker_main_t * cwm, if (ret) return 0; - mapped = update_qp_data (cwm, cdev_id, qp, is_outbound, &qp_index); - if (mapped < 0) - return -1; + update_qp_data (cwm, cdev_id, qp, is_outbound, &qp_index); data = (uword) qp_index; + hash_set (cwm->algo_qp_map, key, data); - ret = hash_set (cwm->algo_qp_map, key, data); - if (!ret) - rte_panic ("Failed to insert hash table\n"); - - return mapped; + return 1; } /* @@ -120,19 +156,13 @@ add_cdev_mapping (crypto_worker_main_t * cwm, for (j = dev_info->capabilities; j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) { - int status = 0; - if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) continue; if (check_algo_is_supported (j, NULL) != 0) continue; - status = add_mapping (cwm, cdev_id, qp, is_outbound, i, j); - if (status == 1) - mapped += 1; - if (status < 0) - return status; + mapped |= add_mapping (cwm, cdev_id, qp, is_outbound, i, j); } } @@ -169,8 +199,33 @@ check_cryptodev_queues () } static clib_error_t * -dpdk_ipsec_init (vlib_main_t * vm) +dpdk_ipsec_check_support (ipsec_sa_t * sa) +{ + if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) + { + if (sa->integ_alg != IPSEC_INTEG_ALG_NONE) + return clib_error_return (0, "unsupported integ-alg %U with " + "crypto-algo aes-gcm-128", + format_ipsec_integ_alg, sa->integ_alg); + sa->integ_alg = IPSEC_INTEG_ALG_AES_GCM_128; + } + else + { + if (sa->integ_alg == IPSEC_INTEG_ALG_NONE || + sa->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128) + return clib_error_return (0, "unsupported integ-alg %U", + format_ipsec_integ_alg, sa->integ_alg); + } + + return 0; +} + +static uword +dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, + vlib_frame_t * f) { + dpdk_config_main_t *conf = &dpdk_config_main; + ipsec_main_t *im = &ipsec_main; dpdk_crypto_main_t *dcm = &dpdk_crypto_main; vlib_thread_main_t *tm = vlib_get_thread_main (); struct rte_cryptodev_config dev_conf; @@ -180,8 +235,19 @@ dpdk_ipsec_init (vlib_main_t * vm) i32 dev_id, ret; u32 i, skip_master; + if (!conf->cryptodev) + { + clib_warning ("DPDK Cryptodev support is disabled, " + "default to OpenSSL IPsec"); + return 0; + } + if (check_cryptodev_queues () < 0) - return clib_error_return (0, "not enough cryptodevs for ipsec"); + { + conf->cryptodev = 0; + clib_warning ("not enough Cryptodevs, default to OpenSSL IPsec"); + return 0; + } vec_alloc (dcm->workers_main, tm->n_vlib_mains); _vec_len (dcm->workers_main) = tm->n_vlib_mains; @@ -221,24 +287,17 @@ dpdk_ipsec_init (vlib_main_t * vm) { map = hash_create (0, sizeof (crypto_worker_qp_key_t)); if (!map) - return clib_error_return (0, "unable to create hash table " - "for worker %u", - vlib_mains[i]->cpu_index); + { + clib_warning ("unable to create hash table for worker %u", + vlib_mains[i]->cpu_index); + goto error; + } cwm->algo_qp_map = map; } for (is_outbound = 0; is_outbound < 2 && qp < max_nb_qp; is_outbound++) - { - int mapped = add_cdev_mapping (cwm, &cdev_info, - dev_id, qp, is_outbound); - if (mapped > 0) - qp++; - - if (mapped < 0) - return clib_error_return (0, - "too many queues for one worker"); - } + qp += add_cdev_mapping (cwm, &cdev_info, dev_id, qp, is_outbound); } if (qp == 0) @@ -246,12 +305,15 @@ dpdk_ipsec_init (vlib_main_t * vm) dev_conf.socket_id = rte_cryptodev_socket_id (dev_id); dev_conf.nb_queue_pairs = cdev_info.max_nb_queue_pairs; - dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_OBJS; + dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_SESS_OBJS; dev_conf.session_mp.cache_size = DPDK_CRYPTO_CACHE_SIZE; ret = rte_cryptodev_configure (dev_id, &dev_conf); if (ret < 0) - return clib_error_return (0, "cryptodev %u config error", dev_id); + { + clib_warning ("cryptodev %u config error", dev_id); + goto error; + } qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC; for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++) @@ -259,37 +321,64 @@ dpdk_ipsec_init (vlib_main_t * vm) ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf, dev_conf.socket_id); if (ret < 0) - return clib_error_return (0, "cryptodev %u qp %u setup error", - dev_id, qp); + { + clib_warning ("cryptodev %u qp %u setup error", dev_id, qp); + goto error; + } } - fprintf (stdout, "%u\t%u\t%u\t%u\n", dev_id, dev_conf.nb_queue_pairs, - DPDK_CRYPTO_NB_OBJS, DPDK_CRYPTO_CACHE_SIZE); - } + vec_validate_aligned (dcm->cop_pools, dev_conf.socket_id, + CLIB_CACHE_LINE_BYTES); - u32 socket_id = rte_socket_id (); + if (!vec_elt (dcm->cop_pools, dev_conf.socket_id)) + { + u8 *pool_name = format (0, "crypto_op_pool_socket%u%c", + dev_conf.socket_id, 0); + + rmp = rte_crypto_op_pool_create ((char *) pool_name, + RTE_CRYPTO_OP_TYPE_SYMMETRIC, + DPDK_CRYPTO_NB_COPS * + (1 + vlib_num_workers ()), + DPDK_CRYPTO_CACHE_SIZE, + DPDK_CRYPTO_PRIV_SIZE, + dev_conf.socket_id); + vec_free (pool_name); + + if (!rmp) + { + clib_warning ("failed to allocate mempool on socket %u", + dev_conf.socket_id); + goto error; + } + vec_elt (dcm->cop_pools, dev_conf.socket_id) = rmp; + } - vec_validate_aligned (dcm->cop_pools, socket_id, CLIB_CACHE_LINE_BYTES); + fprintf (stdout, "%u\t%u\t%u\t%u\n", dev_id, dev_conf.nb_queue_pairs, + DPDK_CRYPTO_NB_SESS_OBJS, DPDK_CRYPTO_CACHE_SIZE); + } - /* pool already exists, nothing to do */ - if (dcm->cop_pools[socket_id]) - return 0; + dpdk_esp_init (); - u8 *pool_name = format (0, "crypto_op_pool_socket%u%c", socket_id, 0); + /* Add new next node and set as default */ + vlib_node_t *node, *next_node; - rmp = rte_crypto_op_pool_create ((char *) pool_name, - RTE_CRYPTO_OP_TYPE_SYMMETRIC, - DPDK_CRYPTO_NB_COPS * - (1 + vlib_num_workers ()), - DPDK_CRYPTO_CACHE_SIZE, - DPDK_CRYPTO_PRIV_SIZE, socket_id); - vec_free (pool_name); + next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp-encrypt"); + ASSERT (next_node); + node = vlib_get_node_by_name (vm, (u8 *) "ipsec-output-ip4"); + ASSERT (node); + im->esp_encrypt_node_index = next_node->index; + im->esp_encrypt_next_index = + vlib_node_add_next (vm, node->index, next_node->index); - if (!rmp) - return clib_error_return (0, "failed to allocate mempool on socket %u", - socket_id); - dcm->cop_pools[socket_id] = rmp; + next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp-decrypt"); + ASSERT (next_node); + node = vlib_get_node_by_name (vm, (u8 *) "ipsec-input-ip4"); + ASSERT (node); + im->esp_decrypt_node_index = next_node->index; + im->esp_decrypt_next_index = + vlib_node_add_next (vm, node->index, next_node->index); - dpdk_esp_init (); + im->cb.check_support_cb = dpdk_ipsec_check_support; + im->cb.add_del_sa_sess_cb = add_del_sa_sess; if (vec_len (vlib_mains) == 0) vlib_node_set_state (&vlib_global_main, dpdk_crypto_input_node.index, @@ -299,10 +388,38 @@ dpdk_ipsec_init (vlib_main_t * vm) vlib_node_set_state (vlib_mains[i], dpdk_crypto_input_node.index, VLIB_NODE_STATE_POLLING); + /* TODO cryptodev counters */ + + return 0; + +error: + ; + crypto_worker_main_t *cwm; + struct rte_mempool **mp; + /* *INDENT-OFF* */ + vec_foreach (cwm, dcm->workers_main) + hash_free (cwm->algo_qp_map); + + vec_foreach (mp, dcm->cop_pools) + { + if (mp) + rte_mempool_free (mp[0]); + } + /* *INDENT-ON* */ + vec_free (dcm->workers_main); + vec_free (dcm->cop_pools); + return 0; } -VLIB_MAIN_LOOP_ENTER_FUNCTION (dpdk_ipsec_init); +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_ipsec_process_node,static) = { + .function = dpdk_ipsec_process, + .type = VLIB_NODE_TYPE_PROCESS, + .name = "dpdk-ipsec-process", + .process_log2_n_stack_bytes = 17, +}; +/* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON diff --git a/src/vnet/devices/dpdk/ipsec/ipsec.h b/src/vnet/devices/dpdk/ipsec/ipsec.h index e6c7498c..3465b361 100644 --- a/src/vnet/devices/dpdk/ipsec/ipsec.h +++ b/src/vnet/devices/dpdk/ipsec/ipsec.h @@ -167,7 +167,7 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap, .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher = RTE_CRYPTO_CIPHER_3DES_CBC,.name = "3DES-CBC"}, { - .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.auth = + .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher = RTE_CRYPTO_CIPHER_AES_GCM,.name = "AES-GCM"}, { .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = diff --git a/src/vnet/ipsec-gre/interface.c b/src/vnet/ipsec-gre/interface.c index 56832ee1..3b6e4ac2 100644 --- a/src/vnet/ipsec-gre/interface.c +++ b/src/vnet/ipsec-gre/interface.c @@ -28,13 +28,7 @@ #include #include -#if DPDK_CRYPTO==1 -#include -#define ESP_NODE "dpdk-esp-encrypt" -#else #include -#define ESP_NODE "esp-encrypt" -#endif u8 * format_ipsec_gre_tunnel (u8 * s, va_list * args) @@ -193,7 +187,7 @@ vnet_ipsec_gre_add_del_tunnel (vnet_ipsec_gre_add_del_tunnel_args_t * a, hash_set (igm->tunnel_by_key, key, t - igm->tunnels); slot = vlib_node_add_named_next_with_slot - (vnm->vlib_main, hi->tx_node_index, ESP_NODE, + (vnm->vlib_main, hi->tx_node_index, "esp-encrypt", IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT); ASSERT (slot == IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT); diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c index ee85c402..cfe434ab 100644 --- a/src/vnet/ipsec/ipsec.c +++ b/src/vnet/ipsec/ipsec.c @@ -22,23 +22,7 @@ #include #include - -#if DPDK_CRYPTO==1 -#include -#define ESP_NODE "dpdk-esp-encrypt" -#else #include -#define ESP_NODE "esp-encrypt" -#endif - -#if DPDK_CRYPTO==0 -/* dummy function */ -static int -add_del_sa_sess (u32 sa_index, u8 is_add) -{ - return 0; -} -#endif u32 ipsec_get_sa_index_by_sa_id (u32 sa_id) @@ -449,7 +433,9 @@ ipsec_add_del_sa (vlib_main_t * vm, ipsec_sa_t * new_sa, int is_add) return VNET_API_ERROR_SYSCALL_ERROR_1; /* sa used in policy */ } hash_unset (im->sa_index_by_sa_id, sa->id); - add_del_sa_sess (sa_index, is_add); + if (im->cb.add_del_sa_sess_cb && + im->cb.add_del_sa_sess_cb (sa_index, is_add) < 0) + return VNET_API_ERROR_SYSCALL_ERROR_1; pool_put (im->sad, sa); } else /* create new SA */ @@ -458,7 +444,8 @@ ipsec_add_del_sa (vlib_main_t * vm, ipsec_sa_t * new_sa, int is_add) clib_memcpy (sa, new_sa, sizeof (*sa)); sa_index = sa - im->sad; hash_set (im->sa_index_by_sa_id, sa->id, sa_index); - if (add_del_sa_sess (sa_index, is_add) < 0) + if (im->cb.add_del_sa_sess_cb && + im->cb.add_del_sa_sess_cb (sa_index, is_add) < 0) return VNET_API_ERROR_SYSCALL_ERROR_1; } return 0; @@ -497,7 +484,8 @@ ipsec_set_sa_key (vlib_main_t * vm, ipsec_sa_t * sa_update) if (sa->crypto_key_len + sa->integ_key_len > 0) { - if (add_del_sa_sess (sa_index, 0) < 0) + if (im->cb.add_del_sa_sess_cb && + im->cb.add_del_sa_sess_cb (sa_index, 0) < 0) return VNET_API_ERROR_SYSCALL_ERROR_1; } @@ -521,6 +509,19 @@ ipsec_rand_seed (void) RAND_seed ((const void *) &seed_data, sizeof (seed_data)); } +static clib_error_t * +ipsec_check_support (ipsec_sa_t * sa) +{ + if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) + return clib_error_return (0, "unsupported aes-gcm-128 crypto-alg"); + if (sa->integ_alg == IPSEC_INTEG_ALG_NONE) + return clib_error_return (0, "unsupported none integ-alg"); + if (sa->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128) + return clib_error_return (0, "unsupported aes-gcm-128 integ-alg"); + + return 0; +} + static clib_error_t * ipsec_init (vlib_main_t * vm) { @@ -547,14 +548,18 @@ ipsec_init (vlib_main_t * vm) ASSERT (node); im->error_drop_node_index = node->index; - node = vlib_get_node_by_name (vm, (u8 *) ESP_NODE); - + node = vlib_get_node_by_name (vm, (u8 *) "esp-encrypt"); ASSERT (node); im->esp_encrypt_node_index = node->index; - node = vlib_get_node_by_name (vm, (u8 *) "ip4-lookup"); + node = vlib_get_node_by_name (vm, (u8 *) "esp-decrypt"); ASSERT (node); - im->ip4_lookup_node_index = node->index; + im->esp_decrypt_node_index = node->index; + + im->esp_encrypt_next_index = IPSEC_OUTPUT_NEXT_ESP_ENCRYPT; + im->esp_decrypt_next_index = IPSEC_INPUT_NEXT_ESP_DECRYPT; + + im->cb.check_support_cb = ipsec_check_support; if ((error = vlib_call_init_function (vm, ipsec_cli_init))) return error; diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index 32c7edfc..6726dba0 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -17,6 +17,33 @@ #define IPSEC_FLAG_IPSEC_GRE_TUNNEL (1 << 0) + +#define foreach_ipsec_output_next \ +_(DROP, "error-drop") \ +_(ESP_ENCRYPT, "esp-encrypt") + +#define _(v, s) IPSEC_OUTPUT_NEXT_##v, +typedef enum +{ + foreach_ipsec_output_next +#undef _ + IPSEC_OUTPUT_N_NEXT, +} ipsec_output_next_t; + + +#define foreach_ipsec_input_next \ +_(DROP, "error-drop") \ +_(ESP_DECRYPT, "esp-decrypt") + +#define _(v, s) IPSEC_INPUT_NEXT_##v, +typedef enum +{ + foreach_ipsec_input_next +#undef _ + IPSEC_INPUT_N_NEXT, +} ipsec_input_next_t; + + #define foreach_ipsec_policy_action \ _(0, BYPASS, "bypass") \ _(1, DISCARD, "discard") \ @@ -31,20 +58,12 @@ typedef enum IPSEC_POLICY_N_ACTION, } ipsec_policy_action_t; -#if DPDK_CRYPTO==1 #define foreach_ipsec_crypto_alg \ _(0, NONE, "none") \ _(1, AES_CBC_128, "aes-cbc-128") \ _(2, AES_CBC_192, "aes-cbc-192") \ _(3, AES_CBC_256, "aes-cbc-256") \ _(4, AES_GCM_128, "aes-gcm-128") -#else -#define foreach_ipsec_crypto_alg \ - _(0, NONE, "none") \ - _(1, AES_CBC_128, "aes-cbc-128") \ - _(2, AES_CBC_192, "aes-cbc-192") \ - _(3, AES_CBC_256, "aes-cbc-256") -#endif typedef enum { @@ -54,7 +73,6 @@ typedef enum IPSEC_CRYPTO_N_ALG, } ipsec_crypto_alg_t; -#if DPDK_CRYPTO==1 #define foreach_ipsec_integ_alg \ _(0, NONE, "none") \ _(1, MD5_96, "md5-96") /* RFC2403 */ \ @@ -63,17 +81,7 @@ typedef enum _(4, SHA_256_128, "sha-256-128") /* RFC4868 */ \ _(5, SHA_384_192, "sha-384-192") /* RFC4868 */ \ _(6, SHA_512_256, "sha-512-256") /* RFC4868 */ \ - _(7, AES_GCM_128, "aes-gcm-128") -#else -#define foreach_ipsec_integ_alg \ - _(0, NONE, "none") \ - _(1, MD5_96, "md5-96") /* RFC2403 */ \ - _(2, SHA1_96, "sha1-96") /* RFC2404 */ \ - _(3, SHA_256_96, "sha-256-96") /* draft-ietf-ipsec-ciph-sha-256-00 */ \ - _(4, SHA_256_128, "sha-256-128") /* RFC4868 */ \ - _(5, SHA_384_192, "sha-384-192") /* RFC4868 */ \ - _(6, SHA_512_256, "sha-512-256") /* RFC4868 */ -#endif + _(7, AES_GCM_128, "aes-gcm-128") /* RFC4106 */ typedef enum { @@ -223,6 +231,12 @@ typedef struct u32 hw_if_index; } ipsec_tunnel_if_t; +typedef struct +{ + i32 (*add_del_sa_sess_cb) (u32 sa_index, u8 is_add); + clib_error_t *(*check_support_cb) (ipsec_sa_t * sa); +} ipsec_main_callbacks_t; + typedef struct { /* pool of tunnel instances */ @@ -250,11 +264,16 @@ typedef struct uword *sa_index_by_sa_id; uword *ipsec_if_pool_index_by_key; - /* node indexes */ + /* node indeces */ u32 error_drop_node_index; - u32 ip4_lookup_node_index; u32 esp_encrypt_node_index; + u32 esp_decrypt_node_index; + /* next node indeces */ + u32 esp_encrypt_next_index; + u32 esp_decrypt_next_index; + /* callbacks */ + ipsec_main_callbacks_t cb; } ipsec_main_t; ipsec_main_t ipsec_main; diff --git a/src/vnet/ipsec/ipsec_api.c b/src/vnet/ipsec/ipsec_api.c index 9bcf63b4..30732266 100644 --- a/src/vnet/ipsec/ipsec_api.c +++ b/src/vnet/ipsec/ipsec_api.c @@ -177,6 +177,7 @@ static void vl_api_ipsec_sad_add_del_entry_t_handler vl_api_ipsec_sad_add_del_entry_reply_t *rmp; int rv; #if WITH_LIBSSL > 0 + ipsec_main_t *im = &ipsec_main; ipsec_sa_t sa; memset (&sa, 0, sizeof (sa)); @@ -204,11 +205,7 @@ static void vl_api_ipsec_sad_add_del_entry_t_handler sa.crypto_key_len = mp->crypto_key_length; clib_memcpy (&sa.crypto_key, mp->crypto_key, sizeof (sa.crypto_key)); /* check for unsupported integ-alg */ -#if DPDK_CRYPTO==1 if (mp->integrity_algorithm < IPSEC_INTEG_ALG_NONE || -#else - if (mp->integrity_algorithm < IPSEC_INTEG_ALG_SHA1_96 || -#endif mp->integrity_algorithm >= IPSEC_INTEG_N_ALG) { clib_warning ("unsupported integ-alg: '%U'", format_ipsec_integ_alg, @@ -217,35 +214,6 @@ static void vl_api_ipsec_sad_add_del_entry_t_handler goto out; } -#if DPDK_CRYPTO==1 - /*Special cases, aes-gcm-128 encryption */ - if (mp->crypto_algorithm == IPSEC_CRYPTO_ALG_AES_GCM_128) - { - if (mp->integrity_algorithm != IPSEC_INTEG_ALG_NONE - && mp->integrity_algorithm != IPSEC_INTEG_ALG_AES_GCM_128) - { - clib_warning - ("unsupported: aes-gcm-128 crypto-alg needs none as integ-alg"); - rv = VNET_API_ERROR_UNIMPLEMENTED; - goto out; - } - else /*set integ-alg internally to aes-gcm-128 */ - mp->integrity_algorithm = IPSEC_INTEG_ALG_AES_GCM_128; - } - else if (mp->integrity_algorithm == IPSEC_INTEG_ALG_AES_GCM_128) - { - clib_warning ("unsupported integ-alg: aes-gcm-128"); - rv = VNET_API_ERROR_UNIMPLEMENTED; - goto out; - } - else if (mp->integrity_algorithm == IPSEC_INTEG_ALG_NONE) - { - clib_warning ("unsupported integ-alg: none"); - rv = VNET_API_ERROR_UNIMPLEMENTED; - goto out; - } -#endif - sa.integ_alg = mp->integrity_algorithm; sa.integ_key_len = mp->integrity_key_length; clib_memcpy (&sa.integ_key, mp->integrity_key, sizeof (sa.integ_key)); @@ -263,6 +231,15 @@ static void vl_api_ipsec_sad_add_del_entry_t_handler clib_memcpy (&sa.tunnel_dst_addr.ip4.data, mp->tunnel_dst_address, 4); } + ASSERT (im->cb.check_support_cb); + clib_error_t *err = im->cb.check_support_cb (&sa); + if (err) + { + clib_warning ("%s", err->what); + rv = VNET_API_ERROR_UNIMPLEMENTED; + goto out; + } + rv = ipsec_add_del_sa (vm, &sa, mp->is_add); #else rv = VNET_API_ERROR_UNIMPLEMENTED; diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c index 7ab85d4a..3c1e26f2 100644 --- a/src/vnet/ipsec/ipsec_cli.c +++ b/src/vnet/ipsec/ipsec_cli.c @@ -67,10 +67,12 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { + ipsec_main_t *im = &ipsec_main; unformat_input_t _line_input, *line_input = &_line_input; ipsec_sa_t sa; int is_add = ~0; u8 *ck = 0, *ik = 0; + clib_error_t *err = 0; memset (&sa, 0, sizeof (sa)); @@ -109,11 +111,7 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm, else if (unformat (line_input, "integ-alg %U", unformat_ipsec_integ_alg, &sa.integ_alg)) { -#if DPDK_CRYPTO==1 - if (sa.integ_alg < IPSEC_INTEG_ALG_NONE || -#else if (sa.integ_alg < IPSEC_INTEG_ALG_SHA1_96 || -#endif sa.integ_alg >= IPSEC_INTEG_N_ALG) return clib_error_return (0, "unsupported integ-alg: '%U'", format_ipsec_integ_alg, sa.integ_alg); @@ -141,23 +139,6 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm, format_unformat_error, line_input); } -#if DPDK_CRYPTO==1 - /*Special cases, aes-gcm-128 encryption */ - if (sa.crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) - { - if (sa.integ_alg != IPSEC_INTEG_ALG_NONE - && sa.integ_alg != IPSEC_INTEG_ALG_AES_GCM_128) - return clib_error_return (0, - "unsupported: aes-gcm-128 crypto-alg needs none as integ-alg"); - else /*set integ-alg internally to aes-gcm-128 */ - sa.integ_alg = IPSEC_INTEG_ALG_AES_GCM_128; - } - else if (sa.integ_alg == IPSEC_INTEG_ALG_AES_GCM_128) - return clib_error_return (0, "unsupported integ-alg: aes-gcm-128"); - else if (sa.integ_alg == IPSEC_INTEG_ALG_NONE) - return clib_error_return (0, "unsupported integ-alg: none"); -#endif - unformat_free (line_input); if (sa.crypto_key_len > sizeof (sa.crypto_key)) @@ -172,6 +153,14 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm, if (ik) strncpy ((char *) sa.integ_key, (char *) ik, sa.integ_key_len); + if (is_add) + { + ASSERT (im->cb.check_support_cb); + err = im->cb.check_support_cb (&sa); + if (err) + return err; + } + ipsec_add_del_sa (vm, &sa, is_add); return 0; diff --git a/src/vnet/ipsec/ipsec_if.c b/src/vnet/ipsec/ipsec_if.c index a8da046f..ca6b0092 100644 --- a/src/vnet/ipsec/ipsec_if.c +++ b/src/vnet/ipsec/ipsec_if.c @@ -20,20 +20,7 @@ #include #include -#if DPDK_CRYPTO==1 -#include -#else #include -#endif - -#if DPDK_CRYPTO==0 -/* dummy function */ -static int -add_del_sa_sess (u32 sa_index, u8 is_add) -{ - return 0; -} -#endif void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length); @@ -52,6 +39,39 @@ dummy_interface_tx (vlib_main_t * vm, return frame->n_vectors; } +static clib_error_t * +ipsec_admin_up_down_function (vnet_main_t * vnm, u32 hw_if_index, u32 flags) +{ + ipsec_main_t *im = &ipsec_main; + clib_error_t *err = 0; + ipsec_tunnel_if_t *t; + vnet_hw_interface_t *hi; + ipsec_sa_t *sa; + + hi = vnet_get_hw_interface (vnm, hw_if_index); + if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) + { + t = pool_elt_at_index (im->tunnel_interfaces, hi->hw_instance); + ASSERT (im->cb.check_support_cb); + sa = pool_elt_at_index (im->sad, t->input_sa_index); + err = im->cb.check_support_cb (sa); + if (err) + return err; + + sa = pool_elt_at_index (im->sad, t->output_sa_index); + err = im->cb.check_support_cb (sa); + if (err) + return err; + + vnet_sw_interface_set_flags (vnm, hi->sw_if_index, + VNET_HW_INTERFACE_FLAG_LINK_UP); + } + else + vnet_sw_interface_set_flags (vnm, hi->sw_if_index, 0 /* down */ ); + + return /* no error */ 0; +} + /* *INDENT-OFF* */ VNET_DEVICE_CLASS (ipsec_device_class, static) = { @@ -59,6 +79,7 @@ VNET_DEVICE_CLASS (ipsec_device_class, static) = .format_device_name = format_ipsec_name, .format_tx_trace = format_ipsec_if_output_trace, .tx_function = dummy_interface_tx, + .admin_up_down_function = ipsec_admin_up_down_function, }; /* *INDENT-ON* */ @@ -138,7 +159,9 @@ ipsec_add_del_tunnel_if_internal (vnet_main_t * vnm, args->remote_crypto_key_len); } - add_del_sa_sess (t->input_sa_index, args->is_add); + if (im->cb.add_del_sa_sess_cb && + im->cb.add_del_sa_sess_cb (t->input_sa_index, args->is_add) < 0) + return VNET_API_ERROR_SYSCALL_ERROR_1; pool_get (im->sad, sa); memset (sa, 0, sizeof (*sa)); @@ -165,7 +188,9 @@ ipsec_add_del_tunnel_if_internal (vnet_main_t * vnm, args->local_crypto_key_len); } - add_del_sa_sess (t->output_sa_index, args->is_add); + if (im->cb.add_del_sa_sess_cb && + im->cb.add_del_sa_sess_cb (t->output_sa_index, args->is_add) < 0) + return VNET_API_ERROR_SYSCALL_ERROR_1; hash_set (im->ipsec_if_pool_index_by_key, key, t - im->tunnel_interfaces); @@ -211,14 +236,16 @@ ipsec_add_del_tunnel_if_internal (vnet_main_t * vnm, /* delete input and output SA */ sa = pool_elt_at_index (im->sad, t->input_sa_index); - if (add_del_sa_sess (t->input_sa_index, args->is_add) < 0) + if (im->cb.add_del_sa_sess_cb && + im->cb.add_del_sa_sess_cb (t->input_sa_index, args->is_add) < 0) return VNET_API_ERROR_SYSCALL_ERROR_1; pool_put (im->sad, sa); sa = pool_elt_at_index (im->sad, t->output_sa_index); - if (add_del_sa_sess (t->output_sa_index, args->is_add) < 0) + if (im->cb.add_del_sa_sess_cb && + im->cb.add_del_sa_sess_cb (t->output_sa_index, args->is_add) < 0) return VNET_API_ERROR_SYSCALL_ERROR_1; pool_put (im->sad, sa); @@ -310,7 +337,8 @@ ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index, sa->crypto_key_len = vec_len (key); clib_memcpy (sa->crypto_key, key, vec_len (key)); - if (add_del_sa_sess (t->input_sa_index, 0) < 0) + if (im->cb.add_del_sa_sess_cb && + im->cb.add_del_sa_sess_cb (t->output_sa_index, 0) < 0) return VNET_API_ERROR_SYSCALL_ERROR_1; } else if (type == IPSEC_IF_SET_KEY_TYPE_LOCAL_INTEG) @@ -320,7 +348,8 @@ ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index, sa->integ_key_len = vec_len (key); clib_memcpy (sa->integ_key, key, vec_len (key)); - if (add_del_sa_sess (t->output_sa_index, 0) < 0) + if (im->cb.add_del_sa_sess_cb && + im->cb.add_del_sa_sess_cb (t->output_sa_index, 0) < 0) return VNET_API_ERROR_SYSCALL_ERROR_1; } else if (type == IPSEC_IF_SET_KEY_TYPE_REMOTE_CRYPTO) @@ -330,7 +359,8 @@ ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index, sa->crypto_key_len = vec_len (key); clib_memcpy (sa->crypto_key, key, vec_len (key)); - if (add_del_sa_sess (t->input_sa_index, 0) < 0) + if (im->cb.add_del_sa_sess_cb && + im->cb.add_del_sa_sess_cb (t->input_sa_index, 0) < 0) return VNET_API_ERROR_SYSCALL_ERROR_1; } else if (type == IPSEC_IF_SET_KEY_TYPE_REMOTE_INTEG) @@ -340,7 +370,8 @@ ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index, sa->integ_key_len = vec_len (key); clib_memcpy (sa->integ_key, key, vec_len (key)); - if (add_del_sa_sess (t->output_sa_index, 0) < 0) + if (im->cb.add_del_sa_sess_cb && + im->cb.add_del_sa_sess_cb (t->input_sa_index, 0) < 0) return VNET_API_ERROR_SYSCALL_ERROR_1; } else diff --git a/src/vnet/ipsec/ipsec_if_in.c b/src/vnet/ipsec/ipsec_if_in.c index db75ab92..bd2a9f78 100644 --- a/src/vnet/ipsec/ipsec_if_in.c +++ b/src/vnet/ipsec/ipsec_if_in.c @@ -22,12 +22,6 @@ #include #include -#if DPDK_CRYPTO==1 -#define ESP_NODE "dpdk-esp-decrypt" -#else -#define ESP_NODE "esp-decrypt" -#endif - /* Statistics (not really errors) */ #define foreach_ipsec_if_input_error \ _(RX, "good packets received") @@ -46,12 +40,6 @@ typedef enum IPSEC_IF_INPUT_N_ERROR, } ipsec_if_input_error_t; -typedef enum -{ - IPSEC_IF_INPUT_NEXT_ESP_DECRYPT, - IPSEC_IF_INPUT_NEXT_DROP, - IPSEC_IF_INPUT_N_NEXT, -} ipsec_if_input_next_t; typedef struct { @@ -59,7 +47,6 @@ typedef struct u32 seq; } ipsec_if_input_trace_t; - u8 * format_ipsec_if_input_trace (u8 * s, va_list * args) { @@ -106,7 +93,7 @@ ipsec_if_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, ip0 = vlib_buffer_get_current (b0); esp0 = (esp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0)); - next0 = IPSEC_IF_INPUT_NEXT_DROP; + next0 = IPSEC_INPUT_NEXT_DROP; u64 key = (u64) ip0->src_address.as_u32 << 32 | (u64) clib_net_to_host_u32 (esp0->spi); @@ -121,7 +108,7 @@ ipsec_if_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_buffer (b0)->ipsec.flags = t->hw_if_index == ~0 ? IPSEC_FLAG_IPSEC_GRE_TUNNEL : 0; vlib_buffer_advance (b0, ip4_header_bytes (ip0)); - next0 = IPSEC_IF_INPUT_NEXT_ESP_DECRYPT; + next0 = im->esp_decrypt_next_index; } if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -156,12 +143,7 @@ VLIB_REGISTER_NODE (ipsec_if_input_node) = { .n_errors = ARRAY_LEN(ipsec_if_input_error_strings), .error_strings = ipsec_if_input_error_strings, - .n_next_nodes = IPSEC_IF_INPUT_N_NEXT, - - .next_nodes = { - [IPSEC_IF_INPUT_NEXT_ESP_DECRYPT] = ESP_NODE, - [IPSEC_IF_INPUT_NEXT_DROP] = "error-drop", - }, + .sibling_of = "ipsec-input-ip4", }; /* *INDENT-ON* */ diff --git a/src/vnet/ipsec/ipsec_if_out.c b/src/vnet/ipsec/ipsec_if_out.c index 8f062828..62ff67ac 100644 --- a/src/vnet/ipsec/ipsec_if_out.c +++ b/src/vnet/ipsec/ipsec_if_out.c @@ -21,12 +21,6 @@ #include -#if DPDK_CRYPTO==1 -#define ESP_NODE "dpdk-esp-encrypt" -#else -#define ESP_NODE "esp-encrypt" -#endif - /* Statistics (not really errors) */ #define foreach_ipsec_if_output_error \ _(TX, "good packets transmitted") @@ -45,12 +39,6 @@ typedef enum IPSEC_IF_OUTPUT_N_ERROR, } ipsec_if_output_error_t; -typedef enum -{ - IPSEC_IF_OUTPUT_NEXT_ESP_ENCRYPT, - IPSEC_IF_OUTPUT_NEXT_DROP, - IPSEC_IF_OUTPUT_N_NEXT, -} ipsec_if_output_next_t; typedef struct { @@ -58,7 +46,6 @@ typedef struct u32 seq; } ipsec_if_output_trace_t; - u8 * format_ipsec_if_output_trace (u8 * s, va_list * args) { @@ -106,7 +93,7 @@ ipsec_if_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0); t0 = pool_elt_at_index (im->tunnel_interfaces, hi0->dev_instance); vnet_buffer (b0)->ipsec.sad_index = t0->output_sa_index; - next0 = IPSEC_IF_OUTPUT_NEXT_ESP_ENCRYPT; + next0 = im->esp_encrypt_next_index; if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -142,12 +129,7 @@ VLIB_REGISTER_NODE (ipsec_if_output_node) = { .n_errors = ARRAY_LEN(ipsec_if_output_error_strings), .error_strings = ipsec_if_output_error_strings, - .n_next_nodes = IPSEC_IF_OUTPUT_N_NEXT, - - .next_nodes = { - [IPSEC_IF_OUTPUT_NEXT_ESP_ENCRYPT] = ESP_NODE, - [IPSEC_IF_OUTPUT_NEXT_DROP] = "error-drop", - }, + .sibling_of = "ipsec-output-ip4", }; /* *INDENT-ON* */ diff --git a/src/vnet/ipsec/ipsec_input.c b/src/vnet/ipsec/ipsec_input.c index 4662c1a1..deaa7b7b 100644 --- a/src/vnet/ipsec/ipsec_input.c +++ b/src/vnet/ipsec/ipsec_input.c @@ -23,30 +23,10 @@ #include #include -#if DPDK_CRYPTO==1 -#define ESP_NODE "dpdk-esp-decrypt" -#else -#define ESP_NODE "esp-decrypt" -#endif - -#define foreach_ipsec_input_next \ -_(DROP, "error-drop") \ -_(ESP_DECRYPT, ESP_NODE) - -#define _(v, s) IPSEC_INPUT_NEXT_##v, -typedef enum -{ - foreach_ipsec_input_next -#undef _ - IPSEC_INPUT_N_NEXT, -} ipsec_input_next_t; - - #define foreach_ipsec_input_error \ _(RX_PKTS, "IPSEC pkts received") \ _(DECRYPTION_FAILED, "IPSEC decryption failed") - typedef enum { #define _(sym,str) IPSEC_INPUT_ERROR_##sym, @@ -262,7 +242,7 @@ ipsec_input_ip4_node_fn (vlib_main_t * vm, p0->counter.bytes += clib_net_to_host_u16 (ip0->length); vnet_buffer (b0)->ipsec.sad_index = p0->sa_index; vnet_buffer (b0)->ipsec.flags = 0; - next0 = IPSEC_INPUT_NEXT_ESP_DECRYPT; + next0 = im->esp_decrypt_next_index; vlib_buffer_advance (b0, ip4_header_bytes (ip0)); goto trace0; } @@ -392,7 +372,7 @@ VLIB_NODE_FUNCTION_MULTIARCH (ipsec_input_ip4_node, ipsec_input_ip4_node_fn) p0->counter.bytes += header_size; vnet_buffer (b0)->ipsec.sad_index = p0->sa_index; vnet_buffer (b0)->ipsec.flags = 0; - next0 = IPSEC_INPUT_NEXT_ESP_DECRYPT; + next0 = im->esp_decrypt_next_index; vlib_buffer_advance (b0, header_size); goto trace0; } diff --git a/src/vnet/ipsec/ipsec_output.c b/src/vnet/ipsec/ipsec_output.c index df93b5e4..1b8070d6 100644 --- a/src/vnet/ipsec/ipsec_output.c +++ b/src/vnet/ipsec/ipsec_output.c @@ -21,27 +21,8 @@ #include -#if DPDK_CRYPTO==1 -#define ESP_NODE "dpdk-esp-encrypt" -#else -#define ESP_NODE "esp-encrypt" -#endif - #if WITH_LIBSSL > 0 -#define foreach_ipsec_output_next \ -_(DROP, "error-drop") \ -_(ESP_ENCRYPT, ESP_NODE) - -#define _(v, s) IPSEC_OUTPUT_NEXT_##v, -typedef enum -{ - foreach_ipsec_output_next -#undef _ - IPSEC_OUTPUT_N_NEXT, -} ipsec_output_next_t; - - #define foreach_ipsec_output_error \ _(RX_PKTS, "IPSec pkts received") \ _(POLICY_DISCARD, "IPSec policy discard") \ @@ -50,7 +31,6 @@ typedef enum _(POLICY_BYPASS, "IPSec policy bypass") \ _(ENCAPS_FAILED, "IPSec encapsulation failed") - typedef enum { #define _(sym,str) IPSEC_OUTPUT_ERROR_##sym, -- cgit 1.2.3-korg From 676112f81e52cd91c88a7ab985e957bd64baafd0 Mon Sep 17 00:00:00 2001 From: Steve Shin Date: Mon, 30 Jan 2017 13:27:26 -0800 Subject: Bump up PKG_SUFFIX to vpp3 Change-Id: Ib5e95d5e5a3da5fb395b26177f7dd17e90afd69d Signed-off-by: Steve Shin --- dpdk/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 22e97878..02b5b210 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -27,7 +27,7 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 16.11 -PKG_SUFFIX ?= vpp2 +PKG_SUFFIX ?= vpp3 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) -- cgit 1.2.3-korg From 460bc633b131d2001bb81d7c2ad5a51ec177ac93 Mon Sep 17 00:00:00 2001 From: Steve Shin Date: Tue, 31 Jan 2017 13:38:08 -0800 Subject: ENIC driver patch to fix MAC address add and remove The mac_addr_add callback function was simply replacing the primary MAC address instead of adding new ones and the mac_addr_remove callback would only remove the primary MAC form the adapter. Fix the functions to add or remove new address. Allow up to 64 MAC addresses per port. Change-Id: Ieff396ae27505c4c09f028911eff907757b03c7d Signed-off-by: Steve Shin --- dpdk/Makefile | 2 +- .../0003-enic-fix-MAC-address-add-and-remove.patch | 122 +++++++++++++++++++++ 2 files changed, 123 insertions(+), 1 deletion(-) create mode 100644 dpdk/dpdk-16.11_patches/0003-enic-fix-MAC-address-add-and-remove.patch (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 02b5b210..00d606d2 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -27,7 +27,7 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 16.11 -PKG_SUFFIX ?= vpp3 +PKG_SUFFIX ?= vpp4 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) diff --git a/dpdk/dpdk-16.11_patches/0003-enic-fix-MAC-address-add-and-remove.patch b/dpdk/dpdk-16.11_patches/0003-enic-fix-MAC-address-add-and-remove.patch new file mode 100644 index 00000000..e2965676 --- /dev/null +++ b/dpdk/dpdk-16.11_patches/0003-enic-fix-MAC-address-add-and-remove.patch @@ -0,0 +1,122 @@ +From 0cd0ed7b0b966704236e07fc1d3bd099deb407a7 Mon Sep 17 00:00:00 2001 +From: John Daley +Date: Tue, 31 Jan 2017 12:59:23 -0800 +Subject: [PATCH] The mac_addr_add callback function was simply replacing the + primary MAC address instead of adding new ones and the mac_addr_remove + callback would only remove the primary MAC form the adapter. Fix the + functions to add or remove new address. Allow up to 64 MAC addresses per + port. + +Signed-off-by: John Daley +--- + drivers/net/enic/enic.h | 5 +++-- + drivers/net/enic/enic_ethdev.c | 6 +++--- + drivers/net/enic/enic_main.c | 21 ++++++++------------- + 3 files changed, 14 insertions(+), 18 deletions(-) + +diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h +index 865cd76..5a807d4 100644 +--- a/drivers/net/enic/enic.h ++++ b/drivers/net/enic/enic.h +@@ -60,6 +60,7 @@ + #define ENIC_RQ_MAX 16 + #define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2)) + #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) ++#define ENIC_MAX_MAC_ADDR 64 + + #define VLAN_ETH_HLEN 18 + +@@ -277,8 +278,8 @@ extern void enic_dev_stats_get(struct enic *enic, + struct rte_eth_stats *r_stats); + extern void enic_dev_stats_clear(struct enic *enic); + extern void enic_add_packet_filter(struct enic *enic); +-extern void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr); +-extern void enic_del_mac_address(struct enic *enic); ++void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr); ++void enic_del_mac_address(struct enic *enic, int mac_index); + extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq); + extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, + struct rte_mbuf *tx_pkt, unsigned short len, +diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c +index 2b154ec..d2d04a9 100644 +--- a/drivers/net/enic/enic_ethdev.c ++++ b/drivers/net/enic/enic_ethdev.c +@@ -464,7 +464,7 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, + device_info->max_tx_queues = enic->conf_wq_count; + device_info->min_rx_bufsize = ENIC_MIN_MTU; + device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4; +- device_info->max_mac_addrs = 1; ++ device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR; + device_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | +@@ -545,12 +545,12 @@ static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev, + enic_set_mac_address(enic, mac_addr->addr_bytes); + } + +-static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index) ++static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index) + { + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); +- enic_del_mac_address(enic); ++ enic_del_mac_address(enic, index); + } + + static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c +index f0b15ac..21e8ede 100644 +--- a/drivers/net/enic/enic_main.c ++++ b/drivers/net/enic/enic_main.c +@@ -190,9 +190,12 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) + r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf); + } + +-void enic_del_mac_address(struct enic *enic) ++void enic_del_mac_address(struct enic *enic, int mac_index) + { +- if (vnic_dev_del_addr(enic->vdev, enic->mac_addr)) ++ struct rte_eth_dev *eth_dev = enic->rte_dev; ++ uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes; ++ ++ if (vnic_dev_del_addr(enic->vdev, mac_addr)) + dev_err(enic, "del mac addr failed\n"); + } + +@@ -205,15 +208,6 @@ void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) + return; + } + +- err = vnic_dev_del_addr(enic->vdev, enic->mac_addr); +- if (err) { +- dev_err(enic, "del mac addr failed\n"); +- return; +- } +- +- ether_addr_copy((struct ether_addr *)mac_addr, +- (struct ether_addr *)enic->mac_addr); +- + err = vnic_dev_add_addr(enic->vdev, mac_addr); + if (err) { + dev_err(enic, "add mac addr failed\n"); +@@ -1308,13 +1302,14 @@ static int enic_dev_init(struct enic *enic) + /* Get the supported filters */ + enic_fdir_info(enic); + +- eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0); ++ eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN ++ * ENIC_MAX_MAC_ADDR, 0); + if (!eth_dev->data->mac_addrs) { + dev_err(enic, "mac addr storage alloc failed, aborting.\n"); + return -1; + } + ether_addr_copy((struct ether_addr *) enic->mac_addr, +- ð_dev->data->mac_addrs[0]); ++ eth_dev->data->mac_addrs); + + vnic_dev_set_reset_flag(enic->vdev, 0); + +-- +1.9.1 + -- cgit 1.2.3-korg From 78edb8e722fe2bb72ba7f8e63b420733c7fb51c6 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 6 Feb 2017 22:28:52 +0100 Subject: Multiple platofrm support for dpdk/Makefile, fix optimizations Using -march=native was causing SIGILLs on Atoms. Change-Id: I98c7fdaa139e3db70c972950dc9c167bf5803656 Signed-off-by: Damjan Marion --- build-data/packages/dpdk.mk | 12 ---------- build-data/platforms/thunder.mk | 36 ---------------------------- build-data/platforms/vpp.mk | 3 --- dpdk/Makefile | 52 ++++++++++++++++++++--------------------- 4 files changed, 25 insertions(+), 78 deletions(-) delete mode 100644 build-data/platforms/thunder.mk (limited to 'dpdk/Makefile') diff --git a/build-data/packages/dpdk.mk b/build-data/packages/dpdk.mk index 6938392c..ed89bb1b 100644 --- a/build-data/packages/dpdk.mk +++ b/build-data/packages/dpdk.mk @@ -1,14 +1,4 @@ -DPDK_MARCH = $(strip $($(PLATFORM)_dpdk_arch)) -ifeq ($(DPDK_MARCH),) - DPDK_MARCH="native" -endif - -DPDK_TUNE = $(strip $($(PLATFORM)_mtune)) -ifeq ($(DPDK_TUNE),) - DPDK_TUNE="generic" -endif - ifneq (,$(findstring debug,$(TAG))) DPDK_DEBUG=y else @@ -18,8 +8,6 @@ endif DPDK_MAKE_ARGS = -C $(call find_source_fn,$(PACKAGE_SOURCE)) \ DPDK_BUILD_DIR=$(PACKAGE_BUILD_DIR) \ DPDK_INSTALL_DIR=$(PACKAGE_INSTALL_DIR) \ - DPDK_MARCH=$(DPDK_MARCH) \ - DPDK_TUNE=$(DPDK_TUNE) \ DPDK_DEBUG=$(DPDK_DEBUG) DPDK_CRYPTO_SW_PMD=$(strip $($(PLATFORM)_uses_dpdk_cryptodev_sw)) diff --git a/build-data/platforms/thunder.mk b/build-data/platforms/thunder.mk deleted file mode 100644 index 31b6a510..00000000 --- a/build-data/platforms/thunder.mk +++ /dev/null @@ -1,36 +0,0 @@ -# Override OS so we can use the sdk toolchain instead of building one -thunder_os = thunderx-linux-gnu - -# Override CROSS_LDFLAGS so we can use -# /lib/aarch64-linux-gnu/ld-linux-aarch64.so.1 instead of building glibc -thunder_cross_ldflags = \ - -Wl,--dynamic-linker=/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1 \ - -Wl,-rpath -Wl,$(lots_of_slashes_to_pad_names)$(TOOL_INSTALL_LIB_DIR) - -thunder_arch = aarch64 -# suppress -march=foo, the cross compiler doesn't understand it -thunder_march = " " - -thunder_root_packages = vppinfra vlib-cavium-dpdk vnet-cavium-dpdk cavium-dpdk \ - vpp-cavium-dpdk vpp-api-test-cavium-dpdk - -vnet-cavium-dpdk_configure_args_thunder = \ - --with-dpdk --without-libssl - -vpp-cavium-dpdk_configure_args_thunder = \ - --with-dpdk --without-libssl - -cavium-dpdk_configure_args_thunder = --with-headroom=256 - -vlib-cavium-dpdk_configure_args_thunder = --with-pre-data=128 - -# native tool chain additions for this platform -thunder_native_tools = vppapigen - -thunder_debug_TAG_CFLAGS = -g -O0 -DCLIB_DEBUG -thunder_debug_TAG_LDFLAGS = -g -O0 -DCLIB_DEBUG - -thunder_TAG_CFLAGS = -g -O2 -thunder_TAG_LDFLAGS = -g -O2 - - diff --git a/build-data/platforms/vpp.mk b/build-data/platforms/vpp.mk index ee307639..401a383a 100644 --- a/build-data/platforms/vpp.mk +++ b/build-data/platforms/vpp.mk @@ -16,17 +16,14 @@ vpp_arch = native ifeq ($(shell uname -m),x86_64) vpp_march = corei7 # Nehalem Instruction set vpp_mtune = corei7-avx # Optimize for Sandy Bridge -vpp_dpdk_arch = corei7 else ifeq ($(shell uname -m),aarch64) ifeq ($(TARGET_PLATFORM),thunderx) vpp_march = armv8-a+crc vpp_mtune = thunderx -vpp_dpdk_arch = armv8a vpp_dpdk_target = arm64-thunderx-linuxapp-gcc else vpp_march = native vpp_mtune = generic -vpp_dpdk_arch = native endif endif vpp_native_tools = vppapigen diff --git a/dpdk/Makefile b/dpdk/Makefile index 00d606d2..c9ed8730 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -18,8 +18,6 @@ DPDK_BUILD_DIR ?= $(CURDIR)/_build DPDK_INSTALL_DIR ?= $(CURDIR)/_install DPDK_PKTMBUF_HEADROOM ?= 128 DPDK_DOWNLOAD_DIR ?= $(HOME)/Downloads -DPDK_MARCH ?= native -DPDK_TUNE ?= generic DPDK_DEBUG ?= n DPDK_CRYPTO_SW_PMD ?= n DPDK_MLX5_PMD ?= n @@ -27,7 +25,7 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 16.11 -PKG_SUFFIX ?= vpp4 +PKG_SUFFIX ?= vpp5 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) @@ -37,20 +35,40 @@ DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) ifneq (,$(findstring clang,$(CC))) DPDK_CC=clang +else ifneq (,$(findstring icc,$(CC))) +DPDK_CC=icc else DPDK_CC=gcc endif +############################################################################## +# Intel x86_64 +############################################################################## +ifeq ($(shell uname -m),x86_64) +DPDK_TARGET ?= x86_64-native-linuxapp-$(DPDK_CC) +DPDK_MACHINE ?= nhm +DPDK_TUNE ?= core-avx2 + +############################################################################## +# Cavium ThunderX +############################################################################## +else ifneq (,$(findstring thunder,$(shell cat /sys/bus/pci/devices/0000:00:01.0/uevent | grep cavium))) +export CROSS="" +DPDK_TARGET ?= arm64-thunderx-linuxapp-$(DPDK_CC) +DPDK_MACHINE ?= thunderx +DPDK_TUNE ?= generic -ifeq (,$(DPDK_TARGET)) -DPDK_TARGET := x86_64-native-linuxapp-$(DPDK_CC) +############################################################################## +# Unknown platofrm +############################################################################## +else +$(error unknown platform) endif JOBS := $(shell grep processor /proc/cpuinfo | wc -l) # compiler/linker custom arguments DPDK_CPU_CFLAGS := -pie -fPIC -DPDK_CPU_LDFLAGS := DPDK_EXTRA_LDFLAGS := -g ifeq ($(DPDK_DEBUG),n) @@ -59,25 +77,6 @@ else DPDK_EXTRA_CFLAGS := -g -O0 endif -# translate gcc march values to DPDK arch -ifeq ($(DPDK_MARCH),native) -DPDK_MACHINE:=native # autodetect host CPU -else ifeq ($(DPDK_MARCH),corei7) -DPDK_MACHINE:=nhm # Nehalem / Westmere -else ifeq ($(DPDK_MARCH),corei7-avx) -DPDK_MACHINE:=snb # Sandy Bridge -else ifeq ($(DPDK_MARCH),core-avx-i) -DPDK_MACHINE:=ivb # Ivy Bridge -else ifeq ($(DPDK_MARCH),core-avx2) -DPDK_MACHINE:=hsw # Haswell -else ifeq ($(DPDK_MARCH),armv7a) -DPDK_MACHINE:=armv7a # ARMv7 -else ifeq ($(DPDK_MARCH),armv8a) -DPDK_MACHINE:=armv8a # ARMv8 -else -$(error Unknown DPDK_MARCH) -endif - # assemble DPDK make arguments DPDK_MAKE_ARGS := -C $(DPDK_SOURCE) -j $(JOBS) \ T=$(DPDK_TARGET) \ @@ -85,11 +84,10 @@ DPDK_MAKE_ARGS := -C $(DPDK_SOURCE) -j $(JOBS) \ EXTRA_CFLAGS="$(DPDK_EXTRA_CFLAGS)" \ EXTRA_LDFLAGS="$(DPDK_EXTRA_LDFLAGS)" \ CPU_CFLAGS="$(DPDK_CPU_CFLAGS)" \ - CPU_LDFLAGS="$(DPDK_CPU_LDFLAGS)" \ DESTDIR=$(I) \ $(DPDK_MAKE_EXTRA_ARGS) -DPDK_SOURCE_FILES := $(shell [ -e $(DPDK_SOURCE) ] && find $(DPDK_SOURCE) -name "*.[chS]") +DPDK_SOURCE_FILES := $(shell [ -e $(DPDK_SOURCE) ] && find $(DPDK_SOURCE) -name "*.[chS]") define set @if grep -q CONFIG_$1 $@ ; \ -- cgit 1.2.3-korg From 4983e47c37410af5c9a4a3a5bcead0f7dad1a925 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Thu, 16 Feb 2017 20:28:35 +0100 Subject: dpdk: bump to DPDK 17.02 Change-Id: I4563208d97c43a200fcee948db491706a8d3e211 Signed-off-by: Damjan Marion --- dpdk/Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index c9ed8730..bdd7993a 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -24,13 +24,14 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) -DPDK_VERSION ?= 16.11 -PKG_SUFFIX ?= vpp5 +DPDK_VERSION ?= 17.02 +PKG_SUFFIX ?= vpp1 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_16.07_TARBALL_MD5_CKSUM := 690a2bb570103e58d12f9806e8bf21be DPDK_16.11_TARBALL_MD5_CKSUM := 06c1c577795360719d0b4fafaeee21e9 +DPDK_17.02_TARBALL_MD5_CKSUM := 6b9f7387c35641f4e8dbba3e528f2376 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) ifneq (,$(findstring clang,$(CC))) -- cgit 1.2.3-korg From 6ca42d333b247eaee4995a7f779a43759a81909e Mon Sep 17 00:00:00 2001 From: Radu Nicolau Date: Thu, 16 Feb 2017 13:54:42 +0000 Subject: dpdk: updated build to automatically download Intel(R) Multi-Buffer Crypto for IPsec Library Change-Id: I58182edb7b0d314bb6dfa1daf7b00012196fd3e1 Signed-off-by: Radu Nicolau --- Makefile | 2 +- dpdk/Makefile | 22 ++++++++++++++++++++++ src/Makefile.am | 2 +- 3 files changed, 24 insertions(+), 2 deletions(-) (limited to 'dpdk/Makefile') diff --git a/Makefile b/Makefile index bbbb2acf..8122c6a4 100644 --- a/Makefile +++ b/Makefile @@ -39,7 +39,7 @@ endif DEB_DEPENDS = curl build-essential autoconf automake bison libssl-dev ccache DEB_DEPENDS += debhelper dkms git libtool libganglia1-dev libapr1-dev dh-systemd DEB_DEPENDS += libconfuse-dev git-review exuberant-ctags cscope pkg-config -DEB_DEPENDS += python-dev python-virtualenv python-pip lcov chrpath autoconf +DEB_DEPENDS += python-dev python-virtualenv python-pip lcov chrpath autoconf nasm ifeq ($(OS_VERSION_ID),14.04) DEB_DEPENDS += openjdk-8-jdk-headless else diff --git a/dpdk/Makefile b/dpdk/Makefile index bdd7993a..f8f1ca13 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -33,6 +33,12 @@ DPDK_16.07_TARBALL_MD5_CKSUM := 690a2bb570103e58d12f9806e8bf21be DPDK_16.11_TARBALL_MD5_CKSUM := 06c1c577795360719d0b4fafaeee21e9 DPDK_17.02_TARBALL_MD5_CKSUM := 6b9f7387c35641f4e8dbba3e528f2376 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) +ifeq ($(DPDK_CRYPTO_SW_PMD),y) +AESNIMB_LIB_TARBALL := v0.44.tar.gz +AESNIMB_LIB_TARBALL_URL := http://github.com/01org/intel-ipsec-mb/archive/$(AESNIMB_LIB_TARBALL) +AESNIMB_LIB_SOURCE := $(B)/intel-ipsec-mb-0.44 +export AESNI_MULTI_BUFFER_LIB_PATH=$(AESNIMB_LIB_SOURCE) +endif ifneq (,$(findstring clang,$(CC))) DPDK_CC=clang @@ -144,6 +150,12 @@ $(CURDIR)/$(DPDK_TARBALL): then cp $(DPDK_DOWNLOAD_DIR)/$(DPDK_TARBALL) $(CURDIR) ; \ else curl -o $(CURDIR)/$(DPDK_TARBALL) -LO $(DPDK_TAR_URL) ; \ fi +ifeq ($(DPDK_CRYPTO_SW_PMD),y) + @if [ -e $(DPDK_DOWNLOAD_DIR)/$(AESNIMB_LIB_TARBALL) ] ; \ + then cp $(DPDK_DOWNLOAD_DIR)/$(AESNIMB_LIB_TARBALL) $(CURDIR) ; \ + else curl -o $(CURDIR)/$(AESNIMB_LIB_TARBALL) -LO $(AESNIMB_LIB_TARBALL_URL) ; \ + fi +endif @rm -f $(B)/.download.ok $(B)/.download.ok: $(CURDIR)/$(DPDK_TARBALL) @@ -160,6 +172,10 @@ download: $(B)/.download.ok $(B)/.extract.ok: $(B)/.download.ok @echo --- extracting $(DPDK_TARBALL) --- @tar --directory $(B) --extract --file $(CURDIR)/$(DPDK_TARBALL) +ifeq ($(DPDK_CRYPTO_SW_PMD),y) + @echo --- extracting $(AESNIMB_LIB_TARBALL) --- + @tar --directory $(B) --extract --file $(CURDIR)/$(AESNIMB_LIB_TARBALL) +endif @touch $@ .PHONY: extract @@ -179,6 +195,9 @@ endif patch: $(B)/.patch.ok $(B)/.config.ok: $(B)/.patch.ok $(B)/custom-config +ifeq ($(DPDK_CRYPTO_SW_PMD),y) + @make -C $(AESNIMB_LIB_SOURCE) +endif @make $(DPDK_MAKE_ARGS) config @touch $@ @@ -188,6 +207,9 @@ config: $(B)/.config.ok $(B)/.build.ok: $(DPDK_SOURCE_FILES) @if [ ! -e $(B)/.config.ok ] ; then echo 'Please run "make config" first' && false ; fi @make $(DPDK_MAKE_ARGS) install +ifeq ($(DPDK_CRYPTO_SW_PMD),y) + @cp $(AESNIMB_LIB_SOURCE)/libIPSec_MB.a $(I)/lib/ +endif @touch $@ .PHONY: build diff --git a/src/Makefile.am b/src/Makefile.am index 7da86fcb..a0e62dbd 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -49,7 +49,7 @@ else DPDK_LD_FLAGS = -Wl,--whole-archive,-l:libdpdk.a,--no-whole-archive,-lm,-ldl endif if WITH_DPDK_CRYPTO_SW -DPDK_LD_ADD = -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB +DPDK_LD_ADD = -lIPSec_MB endif if WITH_DPDK_MLX5_PMD DPDK_LD_FLAGS += -libverbs -lmlx5 -lnuma -- cgit 1.2.3-korg From 2e3677bb2085d4992f74156bdff8fe050ac9de24 Mon Sep 17 00:00:00 2001 From: Radu Nicolau Date: Mon, 20 Feb 2017 12:27:02 +0000 Subject: cryptodev: Automatically download and build ISA-L Crypto library Change-Id: I5454053461e6fb98e7f58f9562efde3590bb7cb5 Signed-off-by: Radu Nicolau --- dpdk/Makefile | 21 ++++++++++++++++++++- src/Makefile.am | 2 +- 2 files changed, 21 insertions(+), 2 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index f8f1ca13..f8c85c96 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -33,11 +33,14 @@ DPDK_16.07_TARBALL_MD5_CKSUM := 690a2bb570103e58d12f9806e8bf21be DPDK_16.11_TARBALL_MD5_CKSUM := 06c1c577795360719d0b4fafaeee21e9 DPDK_17.02_TARBALL_MD5_CKSUM := 6b9f7387c35641f4e8dbba3e528f2376 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) + ifeq ($(DPDK_CRYPTO_SW_PMD),y) AESNIMB_LIB_TARBALL := v0.44.tar.gz AESNIMB_LIB_TARBALL_URL := http://github.com/01org/intel-ipsec-mb/archive/$(AESNIMB_LIB_TARBALL) AESNIMB_LIB_SOURCE := $(B)/intel-ipsec-mb-0.44 -export AESNI_MULTI_BUFFER_LIB_PATH=$(AESNIMB_LIB_SOURCE) +ISA_L_CRYPTO_LIB_TARBALL := isa_l_crypto.tar.gz +ISA_L_CRYPTO_LIB_TARBALL_URL := http://github.com/01org/isa-l_crypto/archive/master.tar.gz +ISA_L_CRYPTO_LIB_SOURCE := $(B)/isa-l_crypto-master endif ifneq (,$(findstring clang,$(CC))) @@ -84,6 +87,12 @@ else DPDK_EXTRA_CFLAGS := -g -O0 endif +ifeq ($(DPDK_CRYPTO_SW_PMD),y) +DPDK_EXTRA_CFLAGS += -I$(ISA_L_CRYPTO_LIB_SOURCE) +DPDK_EXTRA_LDFLAGS += -L$(ISA_L_CRYPTO_LIB_SOURCE)/.libs +DPDK_MAKE_EXTRA_ARGS += AESNI_MULTI_BUFFER_LIB_PATH=$(AESNIMB_LIB_SOURCE) +endif + # assemble DPDK make arguments DPDK_MAKE_ARGS := -C $(DPDK_SOURCE) -j $(JOBS) \ T=$(DPDK_TARGET) \ @@ -155,6 +164,10 @@ ifeq ($(DPDK_CRYPTO_SW_PMD),y) then cp $(DPDK_DOWNLOAD_DIR)/$(AESNIMB_LIB_TARBALL) $(CURDIR) ; \ else curl -o $(CURDIR)/$(AESNIMB_LIB_TARBALL) -LO $(AESNIMB_LIB_TARBALL_URL) ; \ fi + @if [ -e $(DPDK_DOWNLOAD_DIR)/$(ISA_L_CRYPTO_LIB_TARBALL) ] ; \ + then cp $(DPDK_DOWNLOAD_DIR)/$(ISA_L_CRYPTO_LIB_TARBALL) $(CURDIR) ; \ + else curl -o $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL) -LO $(ISA_L_CRYPTO_LIB_TARBALL_URL) ; \ + fi endif @rm -f $(B)/.download.ok @@ -175,6 +188,8 @@ $(B)/.extract.ok: $(B)/.download.ok ifeq ($(DPDK_CRYPTO_SW_PMD),y) @echo --- extracting $(AESNIMB_LIB_TARBALL) --- @tar --directory $(B) --extract --file $(CURDIR)/$(AESNIMB_LIB_TARBALL) + @echo --- extracting $(ISA_L_CRYPTO_LIB_TARBALL) --- + @tar --directory $(B) --extract --file $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL) endif @touch $@ @@ -197,6 +212,9 @@ patch: $(B)/.patch.ok $(B)/.config.ok: $(B)/.patch.ok $(B)/custom-config ifeq ($(DPDK_CRYPTO_SW_PMD),y) @make -C $(AESNIMB_LIB_SOURCE) + @cd $(ISA_L_CRYPTO_LIB_SOURCE) && ./autogen.sh && ./configure + @make -C $(ISA_L_CRYPTO_LIB_SOURCE) + @cp $(ISA_L_CRYPTO_LIB_SOURCE)/include $(ISA_L_CRYPTO_LIB_SOURCE)/isa-l_crypto -r endif @make $(DPDK_MAKE_ARGS) config @touch $@ @@ -209,6 +227,7 @@ $(B)/.build.ok: $(DPDK_SOURCE_FILES) @make $(DPDK_MAKE_ARGS) install ifeq ($(DPDK_CRYPTO_SW_PMD),y) @cp $(AESNIMB_LIB_SOURCE)/libIPSec_MB.a $(I)/lib/ + @cp $(ISA_L_CRYPTO_LIB_SOURCE)/.libs/libisal_crypto.a $(I)/lib/ endif @touch $@ diff --git a/src/Makefile.am b/src/Makefile.am index a0e62dbd..08feb29a 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -49,7 +49,7 @@ else DPDK_LD_FLAGS = -Wl,--whole-archive,-l:libdpdk.a,--no-whole-archive,-lm,-ldl endif if WITH_DPDK_CRYPTO_SW -DPDK_LD_ADD = -lIPSec_MB +DPDK_LD_ADD = -lIPSec_MB -lisal_crypto endif if WITH_DPDK_MLX5_PMD DPDK_LD_FLAGS += -libverbs -lmlx5 -lnuma -- cgit 1.2.3-korg From f7c379403a98cf060d28bac24916c51067c4ec90 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Tue, 28 Feb 2017 23:26:30 +0100 Subject: dpdk: retire support for dpdk 16.07 Change-Id: I8585552c026415340fe9fd0458cb8450da3c4ae2 Signed-off-by: Damjan Marion --- dpdk/Makefile | 1 - ...Add-packet_type-metadata-in-the-i40e-vPMD.patch | 1210 - ...0e-Enable-bad-checksum-flags-in-i40e-vPMD.patch | 111 - ...T_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch | 42 - ...vert-ixgbe-fix-packet-type-from-vector-Rx.patch | 133 - ...DPAA2-Poll-Mode-Driver-Support-dpdk-16.07.patch | 40106 ------------------- ...ers-reset-packet_type-before-using-buffer.patch | 70 - ...low-applications-to-override-rte_delay_us.patch | 43 - ...rash-in-igb_uio-driver-when-the-device-is.patch | 38 - ...mporarily-disable-unthrottled-log-message.patch | 26 - ...bad-L4-checksum-ptype-set-on-ICMP-packets.patch | 18 - ...irtio-enable-indirect-descriptors-feature.patch | 34 - src/vnet/devices/dpdk/dpdk.h | 20 - src/vnet/devices/dpdk/format.c | 12 - src/vnet/devices/dpdk/init.c | 10 +- src/vnet/devices/dpdk/main.c | 7 +- src/vnet/devices/dpdk/node.c | 12 - 17 files changed, 3 insertions(+), 41890 deletions(-) delete mode 100644 dpdk/dpdk-16.07_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch delete mode 100644 dpdk/dpdk-16.07_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch delete mode 100644 dpdk/dpdk-16.07_patches/0003-enic-Set-PKT_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch delete mode 100644 dpdk/dpdk-16.07_patches/0004-Revert-ixgbe-fix-packet-type-from-vector-Rx.patch delete mode 100644 dpdk/dpdk-16.07_patches/0005-NXP-DPAA2-Poll-Mode-Driver-Support-dpdk-16.07.patch delete mode 100644 dpdk/dpdk-16.07_patches/0005-drivers-reset-packet_type-before-using-buffer.patch delete mode 100644 dpdk/dpdk-16.07_patches/0006-Allow-applications-to-override-rte_delay_us.patch delete mode 100644 dpdk/dpdk-16.07_patches/0007-UIO-Fix-a-crash-in-igb_uio-driver-when-the-device-is.patch delete mode 100644 dpdk/dpdk-16.07_patches/0008-Temporarily-disable-unthrottled-log-message.patch delete mode 100644 dpdk/dpdk-16.07_patches/0009-enic-bad-L4-checksum-ptype-set-on-ICMP-packets.patch delete mode 100644 dpdk/dpdk-16.07_patches/0010-virtio-enable-indirect-descriptors-feature.patch (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index f8c85c96..8e187cc2 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -29,7 +29,6 @@ PKG_SUFFIX ?= vpp1 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) -DPDK_16.07_TARBALL_MD5_CKSUM := 690a2bb570103e58d12f9806e8bf21be DPDK_16.11_TARBALL_MD5_CKSUM := 06c1c577795360719d0b4fafaeee21e9 DPDK_17.02_TARBALL_MD5_CKSUM := 6b9f7387c35641f4e8dbba3e528f2376 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) diff --git a/dpdk/dpdk-16.07_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch b/dpdk/dpdk-16.07_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch deleted file mode 100644 index 74d9416e..00000000 --- a/dpdk/dpdk-16.07_patches/0001-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch +++ /dev/null @@ -1,1210 +0,0 @@ -From 79a2ddaf4d7df7172faa54716ae7647ad7a549b9 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Thu, 14 Jul 2016 09:59:01 -0700 -Subject: [PATCH 1/2] i40e: Add packet_type metadata in the i40e vPMD - -The ptype is decoded from the rx descriptor and stored -in the packet type field in the mbuf using the same function -as the non-vector driver. - -Signed-off-by: Damjan Marion -Signed-off-by: Jeff Shaw ---- - drivers/net/i40e/i40e_rxtx.c | 566 +-------------------------------------- - drivers/net/i40e/i40e_rxtx.h | 563 ++++++++++++++++++++++++++++++++++++++ - drivers/net/i40e/i40e_rxtx_vec.c | 16 ++ - 3 files changed, 581 insertions(+), 564 deletions(-) - -diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c -index d3cfb98..2903347 100644 ---- a/drivers/net/i40e/i40e_rxtx.c -+++ b/drivers/net/i40e/i40e_rxtx.c -@@ -174,569 +174,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword) - } - #endif - --/* For each value it means, datasheet of hardware can tell more details -- * -- * @note: fix i40e_dev_supported_ptypes_get() if any change here. -- */ --static inline uint32_t --i40e_rxd_pkt_type_mapping(uint8_t ptype) --{ -- static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { -- /* L2 types */ -- /* [0] reserved */ -- [1] = RTE_PTYPE_L2_ETHER, -- [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, -- /* [3] - [5] reserved */ -- [6] = RTE_PTYPE_L2_ETHER_LLDP, -- /* [7] - [10] reserved */ -- [11] = RTE_PTYPE_L2_ETHER_ARP, -- /* [12] - [21] reserved */ -- -- /* Non tunneled IPv4 */ -- [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_FRAG, -- [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_NONFRAG, -- [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_UDP, -- /* [25] reserved */ -- [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_TCP, -- [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_SCTP, -- [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_ICMP, -- -- /* IPv4 --> IPv4 */ -- [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [32] reserved */ -- [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> IPv6 */ -- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [39] reserved */ -- [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN */ -- [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ -- [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [47] reserved */ -- [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ -- [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [54] reserved */ -- [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ -- [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -- [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [62] reserved */ -- [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -- [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [69] reserved */ -- [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -- [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -- [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [77] reserved */ -- [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -- [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [84] reserved */ -- [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* Non tunneled IPv6 */ -- [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_FRAG, -- [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_NONFRAG, -- [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_UDP, -- /* [91] reserved */ -- [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_TCP, -- [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_SCTP, -- [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_ICMP, -- -- /* IPv6 --> IPv4 */ -- [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [98] reserved */ -- [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> IPv6 */ -- [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [105] reserved */ -- [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_IP | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN */ -- [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ -- [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [113] reserved */ -- [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ -- [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [120] reserved */ -- [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ -- [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -- [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [128] reserved */ -- [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -- [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [135] reserved */ -- [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -- [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -- [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [143] reserved */ -- [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -- [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_FRAG, -- [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_NONFRAG, -- [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_UDP, -- /* [150] reserved */ -- [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_TCP, -- [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_SCTP, -- [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_TUNNEL_GRENAT | -- RTE_PTYPE_INNER_L2_ETHER_VLAN | -- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_INNER_L4_ICMP, -- -- /* L2 NSH packet type */ -- [154] = RTE_PTYPE_L2_ETHER_NSH, -- [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_FRAG, -- [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_NONFRAG, -- [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_UDP, -- [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_TCP, -- [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_SCTP, -- [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -- RTE_PTYPE_L4_ICMP, -- [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_FRAG, -- [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_NONFRAG, -- [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_UDP, -- [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_TCP, -- [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_SCTP, -- [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -- RTE_PTYPE_L4_ICMP, -- -- /* All others reserved */ -- }; -- -- return type_table[ptype]; --} -- - #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03 - #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01 - #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02 -@@ -2136,7 +1573,8 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) - #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC - dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc || - #endif -- dev->rx_pkt_burst == i40e_recv_scattered_pkts) -+ dev->rx_pkt_burst == i40e_recv_scattered_pkts || -+ dev->rx_pkt_burst == i40e_recv_pkts_vec) - return ptypes; - return NULL; - } -diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h -index 98179f0..ecdb13c 100644 ---- a/drivers/net/i40e/i40e_rxtx.h -+++ b/drivers/net/i40e/i40e_rxtx.h -@@ -255,4 +255,567 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev, - struct i40e_tx_queue *txq); - void i40e_set_tx_function(struct rte_eth_dev *dev); - -+/* For each value it means, datasheet of hardware can tell more details -+ * -+ * @note: fix i40e_dev_supported_ptypes_get() if any change here. -+ */ -+static inline uint32_t -+i40e_rxd_pkt_type_mapping(uint8_t ptype) -+{ -+ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { -+ /* L2 types */ -+ /* [0] reserved */ -+ [1] = RTE_PTYPE_L2_ETHER, -+ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, -+ /* [3] - [5] reserved */ -+ [6] = RTE_PTYPE_L2_ETHER_LLDP, -+ /* [7] - [10] reserved */ -+ [11] = RTE_PTYPE_L2_ETHER_ARP, -+ /* [12] - [21] reserved */ -+ -+ /* Non tunneled IPv4 */ -+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ /* [25] reserved */ -+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ -+ /* IPv4 --> IPv4 */ -+ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [32] reserved */ -+ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> IPv6 */ -+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [39] reserved */ -+ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN */ -+ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ -+ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [47] reserved */ -+ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ -+ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [54] reserved */ -+ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ -+ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -+ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [62] reserved */ -+ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -+ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [69] reserved */ -+ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -+ [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -+ [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [77] reserved */ -+ [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -+ [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [84] reserved */ -+ [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* Non tunneled IPv6 */ -+ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ /* [91] reserved */ -+ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ -+ /* IPv6 --> IPv4 */ -+ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [98] reserved */ -+ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> IPv6 */ -+ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [105] reserved */ -+ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_IP | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN */ -+ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ -+ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [113] reserved */ -+ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ -+ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [120] reserved */ -+ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ -+ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ -+ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [128] reserved */ -+ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ -+ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [135] reserved */ -+ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ -+ [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ -+ [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [143] reserved */ -+ [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ -+ [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_FRAG, -+ [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_NONFRAG, -+ [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_UDP, -+ /* [150] reserved */ -+ [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_TCP, -+ [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_SCTP, -+ [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_TUNNEL_GRENAT | -+ RTE_PTYPE_INNER_L2_ETHER_VLAN | -+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_INNER_L4_ICMP, -+ -+ /* L2 NSH packet type */ -+ [154] = RTE_PTYPE_L2_ETHER_NSH, -+ [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_FRAG, -+ [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_NONFRAG, -+ [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_UDP, -+ [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_TCP, -+ [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_SCTP, -+ [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | -+ RTE_PTYPE_L4_ICMP, -+ -+ /* All others reserved */ -+ }; -+ -+ return type_table[ptype]; -+} -+ - #endif /* _I40E_RXTX_H_ */ -diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c -index 05cb415..e78ac63 100644 ---- a/drivers/net/i40e/i40e_rxtx_vec.c -+++ b/drivers/net/i40e/i40e_rxtx_vec.c -@@ -187,6 +187,21 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) - - #define PKTLEN_SHIFT 10 - -+static inline void -+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) -+{ -+ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]); -+ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); -+ -+ ptype0 = _mm_srli_epi64(ptype0, 30); -+ ptype1 = _mm_srli_epi64(ptype1, 30); -+ -+ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0)); -+ rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8)); -+ rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0)); -+ rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8)); -+} -+ - /* - * Notice: - * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet -@@ -393,6 +408,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, - pkt_mb2); - _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, - pkt_mb1); -+ desc_to_ptype_v(descs, &rx_pkts[pos]); - /* C.4 calc avaialbe number of desc */ - var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); - nb_pkts_recd += var; --- -2.7.4 - diff --git a/dpdk/dpdk-16.07_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch b/dpdk/dpdk-16.07_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch deleted file mode 100644 index 58256f19..00000000 --- a/dpdk/dpdk-16.07_patches/0002-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch +++ /dev/null @@ -1,111 +0,0 @@ -From 5917bd1cf9857979a7cae89f362d2c885f09d034 Mon Sep 17 00:00:00 2001 -From: Damjan Marion -Date: Thu, 14 Jul 2016 09:59:02 -0700 -Subject: [PATCH 2/2] i40e: Enable bad checksum flags in i40e vPMD - -Decode the checksum flags from the rx descriptor, setting -the appropriate bit in the mbuf ol_flags field when the flag -indicates a bad checksum. - -Signed-off-by: Damjan Marion -Signed-off-by: Jeff Shaw ---- - drivers/net/i40e/i40e_rxtx_vec.c | 48 +++++++++++++++++++++++----------------- - 1 file changed, 28 insertions(+), 20 deletions(-) - -diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c -index e78ac63..ace51df 100644 ---- a/drivers/net/i40e/i40e_rxtx_vec.c -+++ b/drivers/net/i40e/i40e_rxtx_vec.c -@@ -138,19 +138,14 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq) - static inline void - desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) - { -- __m128i vlan0, vlan1, rss; -- union { -- uint16_t e[4]; -- uint64_t dword; -- } vol; -+ __m128i vlan0, vlan1, rss, l3_l4e; - - /* mask everything except RSS, flow director and VLAN flags - * bit2 is for VLAN tag, bit11 for flow director indication - * bit13:12 for RSS indication. - */ -- const __m128i rss_vlan_msk = _mm_set_epi16( -- 0x0000, 0x0000, 0x0000, 0x0000, -- 0x3804, 0x3804, 0x3804, 0x3804); -+ const __m128i rss_vlan_msk = _mm_set_epi32( -+ 0x1c03004, 0x1c03004, 0x1c03004, 0x1c03004); - - /* map rss and vlan type to rss hash and vlan flag */ - const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, -@@ -163,23 +158,36 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) - PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, - 0, 0, PKT_RX_FDIR, 0); - -- vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]); -- vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]); -- vlan0 = _mm_unpacklo_epi32(vlan0, vlan1); -+ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD, -+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_L4_CKSUM_BAD, -+ PKT_RX_IP_CKSUM_BAD, -+ 0); -+ -+ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]); -+ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]); -+ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1); - - vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); - vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); - -- rss = _mm_srli_epi16(vlan1, 11); -+ rss = _mm_srli_epi32(vlan1, 12); - rss = _mm_shuffle_epi8(rss_flags, rss); - -+ l3_l4e = _mm_srli_epi32(vlan1, 22); -+ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e); -+ - vlan0 = _mm_or_si128(vlan0, rss); -- vol.dword = _mm_cvtsi128_si64(vlan0); -+ vlan0 = _mm_or_si128(vlan0, l3_l4e); - -- rx_pkts[0]->ol_flags = vol.e[0]; -- rx_pkts[1]->ol_flags = vol.e[1]; -- rx_pkts[2]->ol_flags = vol.e[2]; -- rx_pkts[3]->ol_flags = vol.e[3]; -+ rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0); -+ rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2); -+ rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4); -+ rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6); - } - #else - #define desc_to_olflags_v(desc, rx_pkts) do {} while (0) -@@ -754,7 +762,8 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) - #ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE - /* whithout rx ol_flags, no VP flag report */ - if (rxmode->hw_vlan_strip != 0 || -- rxmode->hw_vlan_extend != 0) -+ rxmode->hw_vlan_extend != 0 || -+ rxmode->hw_ip_checksum != 0) - return -1; - #endif - -@@ -765,8 +774,7 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) - /* - no csum error report support - * - no header split support - */ -- if (rxmode->hw_ip_checksum == 1 || -- rxmode->header_split == 1) -+ if (rxmode->header_split == 1) - return -1; - - return 0; --- -2.7.4 - diff --git a/dpdk/dpdk-16.07_patches/0003-enic-Set-PKT_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch b/dpdk/dpdk-16.07_patches/0003-enic-Set-PKT_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch deleted file mode 100644 index 53264158..00000000 --- a/dpdk/dpdk-16.07_patches/0003-enic-Set-PKT_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 6a7a9e52ed2ccfa86c2def3a66a368a5577f2fc2 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Tue, 3 May 2016 13:56:05 -0700 -Subject: [PATCH] enic: Set PKT_RX_VLAN_PKT iff returned packet has VLAN tag - -Only set the ol_flags PKT_RX_VLAN_PKT bit if the packet being passed -to the application contains a VLAN tag. This is true whether -stripping is enabled or disabled. - -This area of the API is in flux, so behaviour may change in the -future. - -Signed-off-by: John Daley ---- - drivers/net/enic/enic_rxtx.c | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - -diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c -index 02b54dd..6a95389 100644 ---- a/drivers/net/enic/enic_rxtx.c -+++ b/drivers/net/enic/enic_rxtx.c -@@ -195,12 +195,16 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) - if (unlikely(!enic_cq_rx_desc_eop(ciflags))) - goto mbuf_flags_done; - -- /* VLAN stripping */ -+ /* VLAN stripping. Set PKT_RX_VLAN_PKT only if there is a vlan tag -+ * in the packet passed up -+ */ - if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { -- pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; -+ pkt_flags |= PKT_RX_VLAN_STRIPPED; - mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); - } else { - mbuf->vlan_tci = 0; -+ if (enic_cq_rx_desc_vlan(cqrd)) -+ pkt_flags |= PKT_RX_VLAN_PKT; - } - - /* RSS flag */ --- - diff --git a/dpdk/dpdk-16.07_patches/0004-Revert-ixgbe-fix-packet-type-from-vector-Rx.patch b/dpdk/dpdk-16.07_patches/0004-Revert-ixgbe-fix-packet-type-from-vector-Rx.patch deleted file mode 100644 index 1c4585f1..00000000 --- a/dpdk/dpdk-16.07_patches/0004-Revert-ixgbe-fix-packet-type-from-vector-Rx.patch +++ /dev/null @@ -1,133 +0,0 @@ -From 44b3a182e791c2f023d2a237a03eb9d3014c7da6 Mon Sep 17 00:00:00 2001 -From: Ray Kinsella -Date: Thu, 4 Aug 2016 17:06:21 +0100 -Subject: [PATCH] Revert "ixgbe: fix packet type from vector Rx" - -This reverts commit d9a2009a81089093645fea2e04b51dd37edf3e6f. - -Conflicts: - drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c - -Signed-off-by: Ray Kinsella ---- - drivers/net/ixgbe/ixgbe_ethdev.c | 4 +++- - drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 34 +++++++++++++++++++++++----------- - 2 files changed, 26 insertions(+), 12 deletions(-) - -diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c -index d478a15..63966a6 100644 ---- a/drivers/net/ixgbe/ixgbe_ethdev.c -+++ b/drivers/net/ixgbe/ixgbe_ethdev.c -@@ -3117,7 +3117,9 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) - if (dev->rx_pkt_burst == ixgbe_recv_pkts || - dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || - dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || -- dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) -+ dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc || -+ dev->rx_pkt_burst == ixgbe_recv_pkts_vec || -+ dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) - return ptypes; - return NULL; - } -diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c -index 1c4fd7c..3aae401 100644 ---- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c -+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c -@@ -231,6 +231,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - ); - __m128i dd_check, eop_check; - uint8_t vlan_flags; -+ __m128i desc_mask = _mm_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, -+ 0xFFFFFFFF, 0xFFFF07F0); - - /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ - nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST); -@@ -271,8 +273,9 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - 13, 12, /* octet 12~13, 16 bits data_len */ - 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ - 13, 12, /* octet 12~13, low 16 bits pkt_len */ -- 0xFF, 0xFF, /* skip 32 bit pkt_type */ -- 0xFF, 0xFF -+ 0xFF, 0xFF, /* skip high 16 bits pkt_type */ -+ 1, /* octet 1, 8 bits pkt_type field */ -+ 0 /* octet 0, 4 bits offset 4 pkt_type field */ - ); - - /* Cache is empty -> need to scan the buffer rings, but first move -@@ -294,6 +297,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; - pos += RTE_IXGBE_DESCS_PER_LOOP, - rxdp += RTE_IXGBE_DESCS_PER_LOOP) { -+ __m128i descs0[RTE_IXGBE_DESCS_PER_LOOP]; - __m128i descs[RTE_IXGBE_DESCS_PER_LOOP]; - __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; - __m128i zero, staterr, sterr_tmp1, sterr_tmp2; -@@ -304,7 +308,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - - /* Read desc statuses backwards to avoid race condition */ - /* A.1 load 4 pkts desc */ -- descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); -+ descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); - - /* B.2 copy 2 mbuf point into rx_pkts */ - _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); -@@ -312,10 +316,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - /* B.1 load 1 mbuf point */ - mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]); - -- descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); -+ descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); - /* B.1 load 2 mbuf point */ -- descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); -- descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); -+ descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); -+ descs0[0] = _mm_loadu_si128((__m128i *)(rxdp)); - - /* B.2 copy 2 mbuf point into rx_pkts */ - _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); -@@ -327,6 +331,14 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); - } - -+ /* A* mask out 0~3 bits RSS type */ -+ descs[3] = _mm_and_si128(descs0[3], desc_mask); -+ descs[2] = _mm_and_si128(descs0[2], desc_mask); -+ -+ /* A* mask out 0~3 bits RSS type */ -+ descs[1] = _mm_and_si128(descs0[1], desc_mask); -+ descs[0] = _mm_and_si128(descs0[0], desc_mask); -+ - /* avoid compiler reorder optimization */ - rte_compiler_barrier(); - -@@ -334,22 +346,22 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, - pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); - pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); - -- /* D.1 pkt 1,2 convert format from desc to pktmbuf */ -- pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); -- pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); -- - /* C.1 4=>2 filter staterr info only */ - sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); - /* C.1 4=>2 filter staterr info only */ - sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); - - /* set ol_flags with vlan packet type */ -- desc_to_olflags_v(descs, vlan_flags, &rx_pkts[pos]); -+ desc_to_olflags_v(descs0, vlan_flags, &rx_pkts[pos]); - - /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ - pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); - pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); - -+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */ -+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); -+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); -+ - /* C.2 get 4 pkts staterr value */ - zero = _mm_xor_si128(dd_check, dd_check); - staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); --- -1.9.1 - diff --git a/dpdk/dpdk-16.07_patches/0005-NXP-DPAA2-Poll-Mode-Driver-Support-dpdk-16.07.patch b/dpdk/dpdk-16.07_patches/0005-NXP-DPAA2-Poll-Mode-Driver-Support-dpdk-16.07.patch deleted file mode 100644 index 9bd3b2a5..00000000 --- a/dpdk/dpdk-16.07_patches/0005-NXP-DPAA2-Poll-Mode-Driver-Support-dpdk-16.07.patch +++ /dev/null @@ -1,40106 +0,0 @@ -From 5a2069b38e85771f3857af390e407360d66cd6ed Mon Sep 17 00:00:00 2001 -From: Sachin Saxena -Date: Fri, 5 Aug 2016 14:06:11 +0530 -Subject: [PATCH 5/5] NXP DPAA2 Poll Mode Driver Support (dpdk-16.07) - - Upstreaming of DPAA2 driver changes is in progress.This patch will - temporary add the support in VPP in-built DPDK. - - Two types of changes: - 1. Driver specfic independent files. No impact on any other functionality. - 2. Changes in common EAL framework. These changes are done in compile time DPAA2 - specific flag, so no impact is expected on other existing features if not - compiling for DPAA2. - -Signed-off-by: Sachin Saxena ---- - config/defconfig_arm64-dpaa2-linuxapp-gcc | 18 +- - drivers/net/Makefile | 1 + - drivers/net/dpaa2/Makefile | 102 + - drivers/net/dpaa2/dpaa2_logs.h | 78 + - drivers/net/dpaa2/mc/dpaiop.c | 457 ++++ - drivers/net/dpaa2/mc/dpbp.c | 432 ++++ - drivers/net/dpaa2/mc/dpci.c | 501 ++++ - drivers/net/dpaa2/mc/dpcon.c | 400 +++ - drivers/net/dpaa2/mc/dpdbg.c | 547 +++++ - drivers/net/dpaa2/mc/dpdcei.c | 449 ++++ - drivers/net/dpaa2/mc/dpdmai.c | 452 ++++ - drivers/net/dpaa2/mc/dpdmux.c | 567 +++++ - drivers/net/dpaa2/mc/dpio.c | 468 ++++ - drivers/net/dpaa2/mc/dpmac.c | 422 ++++ - drivers/net/dpaa2/mc/dpmcp.c | 312 +++ - drivers/net/dpaa2/mc/dpmng.c | 58 + - drivers/net/dpaa2/mc/dpni.c | 1907 +++++++++++++++ - drivers/net/dpaa2/mc/dprc.c | 786 ++++++ - drivers/net/dpaa2/mc/dprtc.c | 509 ++++ - drivers/net/dpaa2/mc/dpseci.c | 502 ++++ - drivers/net/dpaa2/mc/dpsw.c | 1639 +++++++++++++ - drivers/net/dpaa2/mc/fsl_dpaiop.h | 494 ++++ - drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h | 190 ++ - drivers/net/dpaa2/mc/fsl_dpbp.h | 438 ++++ - drivers/net/dpaa2/mc/fsl_dpbp_cmd.h | 172 ++ - drivers/net/dpaa2/mc/fsl_dpci.h | 594 +++++ - drivers/net/dpaa2/mc/fsl_dpci_cmd.h | 200 ++ - drivers/net/dpaa2/mc/fsl_dpcon.h | 407 +++ - drivers/net/dpaa2/mc/fsl_dpcon_cmd.h | 162 ++ - drivers/net/dpaa2/mc/fsl_dpdbg.h | 635 +++++ - drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h | 249 ++ - drivers/net/dpaa2/mc/fsl_dpdcei.h | 515 ++++ - drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h | 182 ++ - drivers/net/dpaa2/mc/fsl_dpdmai.h | 521 ++++ - drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h | 191 ++ - drivers/net/dpaa2/mc/fsl_dpdmux.h | 724 ++++++ - drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 256 ++ - drivers/net/dpaa2/mc/fsl_dpio.h | 460 ++++ - drivers/net/dpaa2/mc/fsl_dpio_cmd.h | 184 ++ - drivers/net/dpaa2/mc/fsl_dpkg.h | 174 ++ - drivers/net/dpaa2/mc/fsl_dpmac.h | 593 +++++ - drivers/net/dpaa2/mc/fsl_dpmac_cmd.h | 195 ++ - drivers/net/dpaa2/mc/fsl_dpmcp.h | 332 +++ - drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h | 135 + - drivers/net/dpaa2/mc/fsl_dpmng.h | 74 + - drivers/net/dpaa2/mc/fsl_dpmng_cmd.h | 46 + - drivers/net/dpaa2/mc/fsl_dpni.h | 2581 ++++++++++++++++++++ - drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 1058 ++++++++ - drivers/net/dpaa2/mc/fsl_dprc.h | 1032 ++++++++ - drivers/net/dpaa2/mc/fsl_dprc_cmd.h | 755 ++++++ - drivers/net/dpaa2/mc/fsl_dprtc.h | 434 ++++ - drivers/net/dpaa2/mc/fsl_dprtc_cmd.h | 181 ++ - drivers/net/dpaa2/mc/fsl_dpseci.h | 647 +++++ - drivers/net/dpaa2/mc/fsl_dpseci_cmd.h | 241 ++ - drivers/net/dpaa2/mc/fsl_dpsw.h | 2164 ++++++++++++++++ - drivers/net/dpaa2/mc/fsl_dpsw_cmd.h | 916 +++++++ - drivers/net/dpaa2/mc/fsl_mc_cmd.h | 221 ++ - drivers/net/dpaa2/mc/fsl_mc_sys.h | 98 + - drivers/net/dpaa2/mc/fsl_net.h | 480 ++++ - drivers/net/dpaa2/mc/mc_sys.c | 127 + - drivers/net/dpaa2/qbman/driver/qbman_debug.c | 929 +++++++ - drivers/net/dpaa2/qbman/driver/qbman_debug.h | 140 ++ - drivers/net/dpaa2/qbman/driver/qbman_portal.c | 1441 +++++++++++ - drivers/net/dpaa2/qbman/driver/qbman_portal.h | 270 ++ - drivers/net/dpaa2/qbman/driver/qbman_private.h | 168 ++ - drivers/net/dpaa2/qbman/driver/qbman_sys.h | 373 +++ - drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h | 69 + - drivers/net/dpaa2/qbman/include/compat.h | 637 +++++ - .../dpaa2/qbman/include/drivers/fsl_qbman_base.h | 151 ++ - .../dpaa2/qbman/include/drivers/fsl_qbman_portal.h | 1087 +++++++++ - drivers/net/dpaa2/rte_eth_dpaa2_pvt.h | 330 +++ - drivers/net/dpaa2/rte_eth_dpbp.c | 377 +++ - drivers/net/dpaa2/rte_eth_dpio.c | 336 +++ - drivers/net/dpaa2/rte_eth_dpni.c | 2269 +++++++++++++++++ - drivers/net/dpaa2/rte_eth_dpni_annot.h | 310 +++ - lib/librte_eal/common/eal_private.h | 7 + - lib/librte_eal/linuxapp/eal/Makefile | 4 + - lib/librte_eal/linuxapp/eal/eal.c | 5 + - lib/librte_eal/linuxapp/eal/eal_soc.c | 67 + - lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c | 650 +++++ - lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h | 98 + - lib/librte_mempool/rte_mempool.h | 8 + - mk/rte.app.mk | 1 + - 83 files changed, 39391 insertions(+), 1 deletion(-) - create mode 100644 drivers/net/dpaa2/Makefile - create mode 100644 drivers/net/dpaa2/dpaa2_logs.h - create mode 100644 drivers/net/dpaa2/mc/dpaiop.c - create mode 100644 drivers/net/dpaa2/mc/dpbp.c - create mode 100644 drivers/net/dpaa2/mc/dpci.c - create mode 100644 drivers/net/dpaa2/mc/dpcon.c - create mode 100644 drivers/net/dpaa2/mc/dpdbg.c - create mode 100644 drivers/net/dpaa2/mc/dpdcei.c - create mode 100644 drivers/net/dpaa2/mc/dpdmai.c - create mode 100644 drivers/net/dpaa2/mc/dpdmux.c - create mode 100644 drivers/net/dpaa2/mc/dpio.c - create mode 100644 drivers/net/dpaa2/mc/dpmac.c - create mode 100644 drivers/net/dpaa2/mc/dpmcp.c - create mode 100644 drivers/net/dpaa2/mc/dpmng.c - create mode 100644 drivers/net/dpaa2/mc/dpni.c - create mode 100644 drivers/net/dpaa2/mc/dprc.c - create mode 100644 drivers/net/dpaa2/mc/dprtc.c - create mode 100644 drivers/net/dpaa2/mc/dpseci.c - create mode 100644 drivers/net/dpaa2/mc/dpsw.c - create mode 100644 drivers/net/dpaa2/mc/fsl_dpaiop.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpbp.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpbp_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpci.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpci_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpcon.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpcon_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdbg.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdcei.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmai.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmux.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpio.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpio_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpkg.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmac.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmac_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmcp.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmng.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpmng_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpni.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpni_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dprc.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dprc_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dprtc.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dprtc_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpseci.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpseci_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpsw.h - create mode 100644 drivers/net/dpaa2/mc/fsl_dpsw_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_mc_cmd.h - create mode 100644 drivers/net/dpaa2/mc/fsl_mc_sys.h - create mode 100644 drivers/net/dpaa2/mc/fsl_net.h - create mode 100644 drivers/net/dpaa2/mc/mc_sys.c - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_debug.c - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_debug.h - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_portal.c - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_portal.h - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_private.h - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_sys.h - create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h - create mode 100644 drivers/net/dpaa2/qbman/include/compat.h - create mode 100644 drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h - create mode 100644 drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h - create mode 100644 drivers/net/dpaa2/rte_eth_dpaa2_pvt.h - create mode 100644 drivers/net/dpaa2/rte_eth_dpbp.c - create mode 100644 drivers/net/dpaa2/rte_eth_dpio.c - create mode 100644 drivers/net/dpaa2/rte_eth_dpni.c - create mode 100644 drivers/net/dpaa2/rte_eth_dpni_annot.h - create mode 100644 lib/librte_eal/linuxapp/eal/eal_soc.c - create mode 100644 lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c - create mode 100644 lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h - -diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc -index 66df54c..e42fa90 100644 ---- a/config/defconfig_arm64-dpaa2-linuxapp-gcc -+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc -@@ -1,6 +1,6 @@ - # BSD LICENSE - # --# Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved. -+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - # - # Redistribution and use in source and binary forms, with or without - # modification, are permitted provided that the following conditions -@@ -40,3 +40,19 @@ CONFIG_RTE_ARCH_ARM_TUNE="cortex-a57+fp+simd" - # - CONFIG_RTE_MAX_LCORE=8 - CONFIG_RTE_MAX_NUMA_NODES=1 -+ -+CONFIG_RTE_PKTMBUF_HEADROOM=256 -+# -+#Kernel KNI component - disable by default to avoid kernel -+#code dependency -+# -+CONFIG_RTE_KNI_KMOD=n -+ -+# Compile software PMD backed by FSL DPAA2 files -+# -+CONFIG_RTE_LIBRTE_DPAA2_PMD=y -+CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=n -+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n -+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n -+CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n -+CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="dpaa2" -diff --git a/drivers/net/Makefile b/drivers/net/Makefile -index bc93230..a71c14a 100644 ---- a/drivers/net/Makefile -+++ b/drivers/net/Makefile -@@ -55,6 +55,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += thunderx - DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio - DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3 - DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt -+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2 - - ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y) - DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost -diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile -new file mode 100644 -index 0000000..3cf1782 ---- /dev/null -+++ b/drivers/net/dpaa2/Makefile -@@ -0,0 +1,102 @@ -+# BSD LICENSE -+# -+# Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved. -+# -+# Redistribution and use in source and binary forms, with or without -+# modification, are permitted provided that the following conditions -+# are met: -+# -+# * Redistributions of source code must retain the above copyright -+# notice, this list of conditions and the following disclaimer. -+# * Redistributions in binary form must reproduce the above copyright -+# notice, this list of conditions and the following disclaimer in -+# the documentation and/or other materials provided with the -+# distribution. -+# * Neither the name of Freescale Semiconductor nor the names of its -+# contributors may be used to endorse or promote products derived -+# from this software without specific prior written permission. -+# -+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+include $(RTE_SDK)/mk/rte.vars.mk -+ -+# -+# library name -+# -+LIB = librte_pmd_dpaa2.a -+ -+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y) -+CFLAGS += -O0 -g -+CFLAGS += "-Wno-error" -+else -+CFLAGS += -O3 -g -+CFLAGS += $(WERROR_FLAGS) -+endif -+CFLAGS +=-Wno-strict-aliasing -+CFLAGS +=-Wno-missing-prototypes -+CFLAGS +=-Wno-missing-declarations -+CFLAGS +=-Wno-unused-function -+ -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include/drivers -+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/driver/ -+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include -+CFLAGS += -I$(RTE_SDK)/lib/librte_ether -+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal -+ -+EXPORT_MAP := rte_pmd_dpaa2_version.map -+ -+LIBABIVER := 1 -+# -+# all source are stored in SRCS-y -+# -+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += \ -+ mc/dprc.c \ -+ mc/dprtc.o \ -+ mc/dpaiop.c \ -+ mc/dpdbg.o \ -+ mc/dpdcei.c \ -+ mc/dpdmai.c \ -+ mc/dpmac.c \ -+ mc/dpmcp.c \ -+ mc/dpbp.c \ -+ mc/dpio.c \ -+ mc/dpni.c \ -+ mc/dpsw.c \ -+ mc/dpci.c \ -+ mc/dpcon.c \ -+ mc/dpseci.c \ -+ mc/dpmng.c \ -+ mc/dpdmux.c \ -+ mc/mc_sys.c -+ -+# -+# all source are stored in SRCS-y -+# -+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += \ -+ qbman/driver/qbman_portal.c \ -+ qbman/driver/qbman_debug.c -+ -+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpni.c -+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpio.c -+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpbp.c -+ -+# -+# Export include files -+# -+SYMLINK-y-include += -+ -+# this lib depends upon: -+DEPDIRS-y += lib/librte_eal -+include $(RTE_SDK)/mk/rte.lib.mk -diff --git a/drivers/net/dpaa2/dpaa2_logs.h b/drivers/net/dpaa2/dpaa2_logs.h -new file mode 100644 -index 0000000..534d4b5 ---- /dev/null -+++ b/drivers/net/dpaa2/dpaa2_logs.h -@@ -0,0 +1,78 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef _DPAA2_LOGS_H_ -+#define _DPAA2_LOGS_H_ -+ -+#define PMD_INIT_LOG(level, fmt, args...) \ -+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) -+ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_INIT -+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") -+#else -+#define PMD_INIT_FUNC_TRACE() do { } while (0) -+#endif -+ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_RX -+#define PMD_RX_LOG(level, fmt, args...) \ -+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -+#else -+#define PMD_RX_LOG(level, fmt, args...) do { } while (0) -+#endif -+ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX -+#define PMD_TX_LOG(level, fmt, args...) \ -+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -+#else -+#define PMD_TX_LOG(level, fmt, args...) do { } while (0) -+#endif -+ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX_FREE -+#define PMD_TX_FREE_LOG(level, fmt, args...) \ -+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -+#else -+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) -+#endif -+ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER -+#define PMD_DRV_LOG_RAW(level, fmt, args...) \ -+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) -+#else -+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) -+#endif -+ -+#define PMD_DRV_LOG2(level, fmt, args...) do { } while (0) -+ -+#define PMD_DRV_LOG(level, fmt, args...) \ -+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args) -+ -+#endif /* _DPAA2_LOGS_H_ */ -diff --git a/drivers/net/dpaa2/mc/dpaiop.c b/drivers/net/dpaa2/mc/dpaiop.c -new file mode 100644 -index 0000000..7c1ecff ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpaiop.c -@@ -0,0 +1,457 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpaiop_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpaiop_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPAIOP_CMD_OPEN(cmd, dpaiop_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpaiop_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpaiop_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ (void)(cfg); /* unused */ -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPAIOP_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpaiop_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpaiop_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpaiop_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpaiop_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpaiop_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpaiop_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPAIOP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpaiop_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPAIOP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dpaiop_load(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_load_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_LOAD, -+ cmd_flags, -+ token); -+ DPAIOP_CMD_LOAD(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_run(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpaiop_run_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_RUN, -+ cmd_flags, -+ token); -+ DPAIOP_CMD_RUN(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_sl_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_sl_version *version) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_SL_VERSION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_SL_VERSION(cmd, version); -+ -+ return 0; -+} -+ -+int dpaiop_get_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_STATE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPAIOP_RSP_GET_STATE(cmd, *state); -+ -+ return 0; -+} -+ -+int dpaiop_set_time_of_day(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t time_of_day) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_TIME_OF_DAY, -+ cmd_flags, -+ token); -+ -+ DPAIOP_CMD_SET_TIME_OF_DAY(cmd, time_of_day); -+ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpaiop_get_time_of_day(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t *time_of_day) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_TIME_OF_DAY, -+ cmd_flags, -+ token); -+ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPAIOP_RSP_GET_TIME_OF_DAY(cmd, *time_of_day); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpbp.c b/drivers/net/dpaa2/mc/dpbp.c -new file mode 100644 -index 0000000..87899b8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpbp.c -@@ -0,0 +1,432 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpbp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpbp_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPBP_CMD_OPEN(cmd, dpbp_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpbp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpbp_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ (void)(cfg); /* unused */ -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpbp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpbp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpbp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpbp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpbp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dpbp_set_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS, -+ cmd_flags, -+ token); -+ -+ DPBP_CMD_SET_NOTIFICATIONS(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPBP_CMD_GET_NOTIFICATIONS(cmd, cfg); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpci.c b/drivers/net/dpaa2/mc/dpci.c -new file mode 100644 -index 0000000..2ec02a1 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpci.c -@@ -0,0 +1,501 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpci_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpci_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPCI_CMD_OPEN(cmd, dpci_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpci_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpci_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPCI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpci_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpci_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpci_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPCI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpci_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpci_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpci_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpci_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpci_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpci_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpci_get_peer_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpci_peer_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_PEER_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_PEER_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpci_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *up) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_LINK_STATE(cmd, *up); -+ -+ return 0; -+} -+ -+int dpci_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ const struct dpci_rx_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpci_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpci_rx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_RX_QUEUE(cmd, priority); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_RX_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpci_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpci_tx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_TX_QUEUE, -+ cmd_flags, -+ token); -+ DPCI_CMD_GET_TX_QUEUE(cmd, priority); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCI_RSP_GET_TX_QUEUE(cmd, attr); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpcon.c b/drivers/net/dpaa2/mc/dpcon.c -new file mode 100644 -index 0000000..56dbcf7 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpcon.c -@@ -0,0 +1,400 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpcon_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpcon_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPCON_CMD_OPEN(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpcon_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpcon_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPCON_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpcon_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpcon_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET, -+ cmd_flags, token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpcon_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpcon_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpcon_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpcon_set_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_NOTIFICATION(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -diff --git a/drivers/net/dpaa2/mc/dpdbg.c b/drivers/net/dpaa2/mc/dpdbg.c -new file mode 100644 -index 0000000..6f2a08d ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpdbg.c -@@ -0,0 +1,547 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpdbg_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdbg_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPDBG_CMD_OPEN(cmd, dpdbg_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpdbg_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdbg_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpni_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_info *info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPNI_INFO, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPNI_INFO(cmd, dpni_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPNI_INFO(cmd, info); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpni_priv_tx_conf_fqid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint8_t sender_id, -+ uint32_t *fqid) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPDBG_CMDID_GET_DPNI_PRIV_TX_CONF_FQID, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPNI_PRIV_TX_CONF_FQID(cmd, dpni_id, sender_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPNI_PRIV_TX_CONF_FQID(cmd, *fqid); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpcon_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ struct dpdbg_dpcon_info *info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPCON_INFO, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPCON_INFO(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPCON_INFO(cmd, info); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpbp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpbp_id, -+ struct dpdbg_dpbp_info *info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPBP_INFO, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPBP_INFO(cmd, dpbp_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPBP_INFO(cmd, info); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpci_fqid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpci_id, -+ uint8_t priority, -+ uint32_t *fqid) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPBP_INFO, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPCI_FQID(cmd, dpci_id, priority); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPCI_FQID(cmd, *fqid); -+ -+ return 0; -+} -+ -+int dpdbg_prepare_ctlu_global_rule(struct dpkg_profile_cfg *dpkg_rule, -+ uint8_t *rule_buf) -+{ -+ int i, j; -+ int offset = 0; -+ int param = 1; -+ uint64_t *params = (uint64_t *)rule_buf; -+ -+ if (!rule_buf || !dpkg_rule) -+ return -EINVAL; -+ -+ params[0] |= mc_enc(0, 8, dpkg_rule->num_extracts); -+ params[0] = cpu_to_le64(params[0]); -+ -+ if (dpkg_rule->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) -+ return -EINVAL; -+ -+ for (i = 0; i < dpkg_rule->num_extracts; i++) { -+ switch (dpkg_rule->extracts[i].type) { -+ case DPKG_EXTRACT_FROM_HDR: -+ params[param] |= mc_enc(0, 8, -+ dpkg_rule->extracts[i].extract.from_hdr.prot); -+ params[param] |= mc_enc(8, 4, -+ dpkg_rule->extracts[i].extract.from_hdr.type); -+ params[param] |= mc_enc(16, 8, -+ dpkg_rule->extracts[i].extract.from_hdr.size); -+ params[param] |= mc_enc(24, 8, -+ dpkg_rule->extracts[i].extract.from_hdr.offset); -+ params[param] |= mc_enc(32, 32, -+ dpkg_rule->extracts[i].extract.from_hdr.field); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ params[param] |= mc_enc(0, 8, -+ dpkg_rule->extracts[i].extract. -+ from_hdr.hdr_index); -+ break; -+ case DPKG_EXTRACT_FROM_DATA: -+ params[param] |= mc_enc(16, 8, -+ dpkg_rule->extracts[i].extract.from_data.size); -+ params[param] |= mc_enc(24, 8, -+ dpkg_rule->extracts[i].extract. -+ from_data.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ case DPKG_EXTRACT_FROM_PARSE: -+ params[param] |= mc_enc(16, 8, -+ dpkg_rule->extracts[i].extract.from_parse.size); -+ params[param] |= mc_enc(24, 8, -+ dpkg_rule->extracts[i].extract. -+ from_parse.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ default: -+ return -EINVAL; -+ } -+ params[param] |= mc_enc( -+ 24, 8, dpkg_rule->extracts[i].num_of_byte_masks); -+ params[param] |= mc_enc(32, 4, dpkg_rule->extracts[i].type); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ for (offset = 0, j = 0; -+ j < DPKG_NUM_OF_MASKS; -+ offset += 16, j++) { -+ params[param] |= mc_enc( -+ (offset), 8, -+ dpkg_rule->extracts[i].masks[j].mask); -+ params[param] |= mc_enc( -+ (offset + 8), 8, -+ dpkg_rule->extracts[i].masks[j].offset); -+ } -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ } -+ return 0; -+} -+ -+int dpdbg_set_ctlu_global_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t marking, -+ struct dpdbg_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_CTLU_GLOBAL_MARKING, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_CTLU_GLOBAL_MARKING(cmd, marking, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpni_rx_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_rx_marking_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_RX_MARKING, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPNI_RX_MARKING(cmd, dpni_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpni_tx_conf_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint16_t sender_id, -+ uint8_t marking) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_TX_CONF_MARKING, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPNI_TX_CONF_MARKING(cmd, dpni_id, sender_id, marking); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpio_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpio_id, -+ uint8_t marking) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPIO_MARKING, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPIO_MARKING(cmd, dpio_id, marking); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_ctlu_global_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdbg_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_CTLU_GLOBAL_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_CTLU_GLOBAL_TRACE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpio_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpio_id, -+ struct dpdbg_dpio_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPIO_TRACE_POINTS]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPIO_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPIO_TRACE(cmd, dpio_id, trace_point); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpni_rx_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_rx_trace_cfg *trace_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_RX_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPNI_RX_TRACE(cmd, dpni_id, trace_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpni_tx_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint16_t sender_id, -+ struct dpdbg_dpni_tx_trace_cfg *trace_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_TX_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPNI_TX_TRACE(cmd, dpni_id, sender_id, trace_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpcon_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ struct dpdbg_dpcon_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPCON_TRACE_POINTS]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPCON_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPCON_TRACE(cmd, dpcon_id, trace_point); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_set_dpseci_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpseci_id, -+ struct dpdbg_dpseci_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPSECI_TRACE_POINTS]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPSECI_TRACE, -+ cmd_flags, -+ token); -+ DPDBG_CMD_SET_DPSECI_TRACE(cmd, dpseci_id, trace_point); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdbg_get_dpmac_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpmac_id, -+ enum dpmac_counter counter_type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPMAC_COUNTER, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpmac_id, counter_type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPMAC_COUNTER(cmd, *counter); -+ -+ return 0; -+} -+ -+int dpdbg_get_dpni_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ enum dpni_counter counter_type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPNI_COUNTER, -+ cmd_flags, -+ token); -+ DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpni_id, counter_type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDBG_RSP_GET_DPNI_COUNTER(cmd, *counter); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpdcei.c b/drivers/net/dpaa2/mc/dpdcei.c -new file mode 100644 -index 0000000..a5c4c47 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpdcei.c -@@ -0,0 +1,449 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpdcei_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdcei_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPDCEI_CMD_OPEN(cmd, dpdcei_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdcei_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdcei_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPDCEI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdcei_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdcei_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdcei_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpdcei_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdcei_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdcei_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpdcei_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpdcei_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdcei_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpdcei_rx_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPDCEI_CMD_SET_RX_QUEUE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdcei_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_rx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_RX_QUEUE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_RX_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdcei_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_tx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_TX_QUEUE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDCEI_RSP_GET_TX_QUEUE(cmd, attr); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpdmai.c b/drivers/net/dpaa2/mc/dpdmai.c -new file mode 100644 -index 0000000..154d2c6 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpdmai.c -@@ -0,0 +1,452 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpdmai_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmai_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPDMAI_CMD_OPEN(cmd, dpdmai_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdmai_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE, -+ cmd_flags, token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmai_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPDMAI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdmai_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmai_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmai_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpdmai_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmai_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmai_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ const struct dpdmai_rx_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, struct dpdmai_rx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_RX_QUEUE(cmd, priority); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_RX_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpdmai_tx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_TX_QUEUE(cmd, priority); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_TX_QUEUE(cmd, attr); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpdmux.c b/drivers/net/dpaa2/mc/dpdmux.c -new file mode 100644 -index 0000000..dc07608 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpdmux.c -@@ -0,0 +1,567 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpdmux_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmux_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPDMUX_CMD_OPEN(cmd, dpdmux_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdmux_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmux_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPDMUX_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdmux_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmux_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmux_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmux_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmux_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_accepted_frames *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_if_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_GET_ATTR(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IF_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, rule); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, rule); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpdmux_counter_type counter_type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IF_GET_COUNTER(cmd, *counter); -+ -+ return 0; -+} -+ -+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_state *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpio.c b/drivers/net/dpaa2/mc/dpio.c -new file mode 100644 -index 0000000..f511e29 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpio.c -@@ -0,0 +1,468 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpio_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpio_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPIO_CMD_OPEN(cmd, dpio_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpio_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpio_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPIO_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpio_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpio_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpio_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpio_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpio_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpio_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpio_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpio_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpio_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t sdest) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_STASHING_DEST(cmd, sdest); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t *sdest) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_STASHING_DEST(cmd, *sdest); -+ -+ return 0; -+} -+ -+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ uint8_t *channel_index) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL, -+ cmd_flags, -+ token); -+ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index); -+ -+ return 0; -+} -+ -+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL, -+ cmd_flags, -+ token); -+ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -diff --git a/drivers/net/dpaa2/mc/dpmac.c b/drivers/net/dpaa2/mc/dpmac.c -new file mode 100644 -index 0000000..f31d949 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpmac.c -@@ -0,0 +1,422 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpmac_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmac_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPMAC_CMD_OPEN(cmd, dpmac_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpmac_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPMAC_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpmac_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmac_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmac_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_READ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_MDIO_READ(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_MDIO_READ(cmd, cfg->data); -+ -+ return 0; -+} -+ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_WRITE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_MDIO_WRITE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPMAC_RSP_GET_LINK_CFG(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_state *link_state) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_LINK_STATE(cmd, link_state); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_COUNTER(cmd, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPMAC_RSP_GET_COUNTER(cmd, *counter); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpmcp.c b/drivers/net/dpaa2/mc/dpmcp.c -new file mode 100644 -index 0000000..dfd84b8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpmcp.c -@@ -0,0 +1,312 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpmcp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmcp_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPMCP_CMD_OPEN(cmd, dpmcp_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpmcp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmcp_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPMCP_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpmcp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmcp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPMCP_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmcp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPMCP_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMCP_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMCP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMCP_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMCP_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMCP_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMCP_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMCP_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMCP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMCP_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpmcp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmcp_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMCP_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpmng.c b/drivers/net/dpaa2/mc/dpmng.c -new file mode 100644 -index 0000000..cac5ba5 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpmng.c -@@ -0,0 +1,58 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int mc_get_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ struct mc_version *mc_ver_info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMNG_RSP_GET_VERSION(cmd, mc_ver_info); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c -new file mode 100644 -index 0000000..cdd2f37 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpni.c -@@ -0,0 +1,1907 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, -+ uint8_t *key_cfg_buf) -+{ -+ int i, j; -+ int offset = 0; -+ int param = 1; -+ uint64_t *params = (uint64_t *)key_cfg_buf; -+ -+ if (!key_cfg_buf || !cfg) -+ return -EINVAL; -+ -+ params[0] |= mc_enc(0, 8, cfg->num_extracts); -+ params[0] = cpu_to_le64(params[0]); -+ -+ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) -+ return -EINVAL; -+ -+ for (i = 0; i < cfg->num_extracts; i++) { -+ switch (cfg->extracts[i].type) { -+ case DPKG_EXTRACT_FROM_HDR: -+ params[param] |= mc_enc(0, 8, -+ cfg->extracts[i].extract.from_hdr.prot); -+ params[param] |= mc_enc(8, 4, -+ cfg->extracts[i].extract.from_hdr.type); -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract.from_hdr.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_hdr.offset); -+ params[param] |= mc_enc(32, 32, -+ cfg->extracts[i].extract. -+ from_hdr.field); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ params[param] |= mc_enc(0, 8, -+ cfg->extracts[i].extract. -+ from_hdr.hdr_index); -+ break; -+ case DPKG_EXTRACT_FROM_DATA: -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract. -+ from_data.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_data.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ case DPKG_EXTRACT_FROM_PARSE: -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract. -+ from_parse.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_parse.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ default: -+ return -EINVAL; -+ } -+ params[param] |= mc_enc( -+ 24, 8, cfg->extracts[i].num_of_byte_masks); -+ params[param] |= mc_enc(32, 4, cfg->extracts[i].type); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ for (offset = 0, j = 0; -+ j < DPKG_NUM_OF_MASKS; -+ offset += 16, j++) { -+ params[param] |= mc_enc( -+ (offset), 8, cfg->extracts[i].masks[j].mask); -+ params[param] |= mc_enc( -+ (offset + 8), 8, -+ cfg->extracts[i].masks[j].offset); -+ } -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ } -+ return 0; -+} -+ -+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, -+ uint8_t *ext_cfg_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)ext_cfg_buf; -+ -+ DPNI_PREP_EXTENDED_CFG(ext_params, cfg); -+ -+ return 0; -+} -+ -+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, -+ const uint8_t *ext_cfg_buf) -+{ -+ const uint64_t *ext_params = (const uint64_t *)ext_cfg_buf; -+ -+ DPNI_EXT_EXTENDED_CFG(ext_params, cfg); -+ -+ return 0; -+} -+ -+int dpni_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpni_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPNI_CMD_OPEN(cmd, dpni_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpni_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpni_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPNI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpni_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_pools_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_POOLS(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpni_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpni_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpni_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpni_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpni_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_ATTR(cmd, attr); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_error_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L3_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L3_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L4_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L4_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_qdid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *qdid) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_QDID(cmd, *qdid); -+ -+ return 0; -+} -+ -+int dpni_get_sp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_sp_info *sp_info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SP_INFO, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_SP_INFO(cmd, sp_info); -+ -+ return 0; -+} -+ -+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *data_offset) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_DATA_OFFSET(cmd, *data_offset); -+ -+ return 0; -+} -+ -+int dpni_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t *value) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_COUNTER(cmd, counter); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_COUNTER(cmd, *value); -+ -+ return 0; -+} -+ -+int dpni_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t value) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_COUNTER, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_COUNTER(cmd, counter, value); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_LINK_CFG(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_link_state *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_LINK_STATE(cmd, state); -+ -+ return 0; -+} -+ -+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_shaping_cfg *tx_shaper) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length); -+ -+ return 0; -+} -+ -+int dpni_set_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t mtu) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MTU, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MTU(cmd, mtu); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *mtu) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MTU, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MTU(cmd, *mtu); -+ -+ return 0; -+} -+ -+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr); -+ -+ return 0; -+} -+ -+int dpni_add_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_FILTERS, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_VLAN_FILTERS(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_selection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SELECTION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_SELECTION(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_dist_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *flow_id, -+ const struct dpni_tx_flow_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_FLOW(cmd, *flow_id, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_SET_TX_FLOW(cmd, *flow_id); -+ -+ return 0; -+} -+ -+int dpni_get_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_flow_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_FLOW(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_FLOW(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ const struct dpni_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ struct dpni_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_FLOW(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_ERR_QUEUE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_ERR_QUEUE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int revoke) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_REVOKE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_qos_tbl_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_QOS_TABLE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg, -+ uint8_t tc_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg, -+ uint16_t flow_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_INSERTION, -+ cmd_flags, token); -+ DPNI_CMD_SET_VLAN_INSERTION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_REMOVAL, -+ cmd_flags, token); -+ DPNI_CMD_SET_VLAN_REMOVAL(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_ipr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPR, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IPR(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_ipf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPF, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IPF(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_policing_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_POLICING, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_rx_tc_policing_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_POLICING, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg); -+ -+ return 0; -+} -+ -+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)early_drop_buf; -+ -+ DPNI_PREP_EARLY_DROP(ext_params, cfg); -+} -+ -+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, -+ const uint8_t *early_drop_buf) -+{ -+ const uint64_t *ext_params = (const uint64_t *)early_drop_buf; -+ -+ DPNI_EXT_EARLY_DROP(ext_params, cfg); -+} -+ -+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_tx_conf_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_conf_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_CONF(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_CONF(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dprc.c b/drivers/net/dpaa2/mc/dprc.c -new file mode 100644 -index 0000000..75c6a68 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dprc.c -@@ -0,0 +1,786 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dprc_get_container_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int *container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_CONTAINER_ID(cmd, *container_id); -+ -+ return 0; -+} -+ -+int dprc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int container_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags, -+ 0); -+ DPRC_CMD_OPEN(cmd, container_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dprc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_create_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_cfg *cfg, -+ int *child_container_id, -+ uint64_t *child_portal_paddr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ DPRC_CMD_CREATE_CONTAINER(cmd, cfg); -+ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_CREATE_CONTAINER(cmd, *child_container_id, -+ *child_portal_paddr); -+ -+ return 0; -+} -+ -+int dprc_destroy_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT, -+ cmd_flags, -+ token); -+ DPRC_CMD_DESTROY_CONTAINER(cmd, child_container_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_reset_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT, -+ cmd_flags, -+ token); -+ DPRC_CMD_RESET_CONTAINER(cmd, child_container_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dprc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dprc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dprc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dprc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPRC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_attributes *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dprc_set_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t quota) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_RES_QUOTA(cmd, child_container_id, type, quota); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t *quota) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_RES_QUOTA(cmd, child_container_id, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_RES_QUOTA(cmd, *quota); -+ -+ return 0; -+} -+ -+int dprc_assign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int container_id, -+ struct dprc_res_req *res_req) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN, -+ cmd_flags, -+ token); -+ DPRC_CMD_ASSIGN(cmd, container_id, res_req); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_unassign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ struct dprc_res_req *res_req) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN, -+ cmd_flags, -+ token); -+ DPRC_CMD_UNASSIGN(cmd, child_container_id, res_req); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_pool_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *pool_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_POOL_COUNT(cmd, *pool_count); -+ -+ return 0; -+} -+ -+int dprc_get_pool(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int pool_index, -+ char *type) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_POOL(cmd, pool_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_POOL(cmd, type); -+ -+ return 0; -+} -+ -+int dprc_get_obj_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *obj_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_OBJ_COUNT(cmd, *obj_count); -+ -+ return 0; -+} -+ -+int dprc_get_obj(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int obj_index, -+ struct dprc_obj_desc *obj_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_OBJ(cmd, obj_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_OBJ(cmd, obj_desc); -+ -+ return 0; -+} -+ -+int dprc_get_obj_desc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ struct dprc_obj_desc *obj_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_OBJ_DESC(cmd, obj_type, obj_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_OBJ_DESC(cmd, obj_desc); -+ -+ return 0; -+} -+ -+int dprc_set_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_OBJ_IRQ(cmd, -+ obj_type, -+ obj_id, -+ irq_index, -+ irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_OBJ_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dprc_get_res_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ int *res_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ *res_count = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_RES_COUNT(cmd, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_RES_COUNT(cmd, *res_count); -+ -+ return 0; -+} -+ -+int dprc_get_res_ids(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ struct dprc_res_ids_range_desc *range_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_RES_IDS(cmd, range_desc, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_RES_IDS(cmd, range_desc); -+ -+ return 0; -+} -+ -+int dprc_get_obj_region(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t region_index, -+ struct dprc_region_desc *region_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_OBJ_REGION(cmd, obj_type, obj_id, region_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_OBJ_REGION(cmd, region_desc); -+ -+ return 0; -+} -+ -+int dprc_set_obj_label(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ char *label) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL, -+ cmd_flags, -+ token); -+ DPRC_CMD_SET_OBJ_LABEL(cmd, obj_type, obj_id, label); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_connect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ const struct dprc_endpoint *endpoint2, -+ const struct dprc_connection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT, -+ cmd_flags, -+ token); -+ DPRC_CMD_CONNECT(cmd, endpoint1, endpoint2, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_disconnect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT, -+ cmd_flags, -+ token); -+ DPRC_CMD_DISCONNECT(cmd, endpoint); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprc_get_connection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ struct dprc_endpoint *endpoint2, -+ int *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION, -+ cmd_flags, -+ token); -+ DPRC_CMD_GET_CONNECTION(cmd, endpoint1); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRC_RSP_GET_CONNECTION(cmd, endpoint2, *state); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dprtc.c b/drivers/net/dpaa2/mc/dprtc.c -new file mode 100644 -index 0000000..73667af ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dprtc.c -@@ -0,0 +1,509 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dprtc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dprtc_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPRTC_CMD_OPEN(cmd, dprtc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dprtc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dprtc_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ (void)(cfg); /* unused */ -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dprtc_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dprtc_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprtc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprtc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dprtc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprtc_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int64_t offset) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_CLOCK_OFFSET(cmd, offset); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t freq_compensation) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_FREQ_COMPENSATION(cmd, freq_compensation); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t *freq_compensation) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_FREQ_COMPENSATION(cmd, *freq_compensation); -+ -+ return 0; -+} -+ -+int dprtc_get_time(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t *time) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPRTC_RSP_GET_TIME(cmd, *time); -+ -+ return 0; -+} -+ -+int dprtc_set_time(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t time) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_TIME(cmd, time); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dprtc_set_alarm(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, uint64_t time) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM, -+ cmd_flags, -+ token); -+ -+ DPRTC_CMD_SET_ALARM(cmd, time); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -diff --git a/drivers/net/dpaa2/mc/dpseci.c b/drivers/net/dpaa2/mc/dpseci.c -new file mode 100644 -index 0000000..a4b932a ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpseci.c -@@ -0,0 +1,502 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+int dpseci_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpseci_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPSECI_CMD_OPEN(cmd, dpseci_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpseci_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpseci_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPSECI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpseci_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpseci_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpseci_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpseci_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpseci_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ const struct dpseci_rx_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ struct dpseci_rx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_RX_QUEUE(cmd, queue); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_RX_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ struct dpseci_tx_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE, -+ cmd_flags, -+ token); -+ DPSECI_CMD_GET_TX_QUEUE(cmd, queue); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_TX_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_sec_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_SEC_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_sec_counters *counters) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/mc/dpsw.c b/drivers/net/dpaa2/mc/dpsw.c -new file mode 100644 -index 0000000..2034b55 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/dpsw.c -@@ -0,0 +1,1639 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+ -+/* internal functions */ -+static void build_if_id_bitmap(const uint16_t *if_id, -+ const uint16_t num_ifs, -+ struct mc_command *cmd, -+ int start_param) -+{ -+ int i; -+ -+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) -+ cmd->params[start_param + (if_id[i] / 64)] |= mc_enc( -+ (if_id[i] % 64), 1, 1); -+} -+ -+static int read_if_id_bitmap(uint16_t *if_id, -+ uint16_t *num_ifs, -+ struct mc_command *cmd, -+ int start_param) -+{ -+ int bitmap[DPSW_MAX_IF] = { 0 }; -+ int i, j = 0; -+ int count = 0; -+ -+ for (i = 0; i < DPSW_MAX_IF; i++) { -+ bitmap[i] = (int)mc_dec(cmd->params[start_param + i / 64], -+ i % 64, 1); -+ count += bitmap[i]; -+ } -+ -+ *num_ifs = (uint16_t)count; -+ -+ for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) { -+ if (bitmap[i]) { -+ if_id[j] = (uint16_t)i; -+ j++; -+ } -+ } -+ -+ return 0; -+} -+ -+/* DPSW APIs */ -+int dpsw_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpsw_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPSW_CMD_OPEN(cmd, dpsw_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpsw_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpsw_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPSW_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpsw_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpsw_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpsw_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpsw_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_SET_REFLECTION_IF(cmd, if_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_state *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_LINK_STATE(cmd, state); -+ -+ return 0; -+} -+ -+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tci_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_tci_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_TCI(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_TCI(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_stp_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_STP(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_accepted_frames_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int accept_all) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_COUNTER(cmd, *counter); -+ -+ return 0; -+} -+ -+int dpsw_if_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t counter) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tx_selection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_metering_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_set_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ const struct dpsw_metering_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)early_drop_buf; -+ -+ DPSW_PREP_EARLY_DROP(ext_params, cfg); -+} -+ -+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID, -+ cmd_flags, -+ token); -+ DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID, -+ cmd_flags, -+ token); -+ DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_ENABLE(cmd, if_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_DISABLE(cmd, if_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_if_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_ATTR(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_IF_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t *frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, *frame_length); -+ -+ return 0; -+} -+ -+int dpsw_vlan_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_REMOVE(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_IF(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_IF(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1); -+ -+ return 0; -+} -+ -+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1); -+ -+ return 0; -+} -+ -+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED, -+ cmd_flags, -+ token); -+ DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_VLAN_GET_IF(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1); -+ -+ return 0; -+} -+ -+int dpsw_fdb_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *fdb_id, -+ const struct dpsw_fdb_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_ADD(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_ADD(cmd, *fdb_id); -+ -+ return 0; -+} -+ -+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_REMOVE(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_unicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_GET_UNICAST(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_multicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg); -+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 2); -+ -+ return 0; -+} -+ -+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ enum dpsw_fdb_learning_mode mode) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR, -+ cmd_flags, -+ token); -+ DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_FDB_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_acl_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *acl_id, -+ const struct dpsw_acl_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_ADD(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_ACL_ADD(cmd, *acl_id); -+ -+ return 0; -+} -+ -+int dpsw_acl_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_REMOVE(cmd, acl_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, -+ uint8_t *entry_cfg_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)entry_cfg_buf; -+ -+ DPSW_PREP_ACL_ENTRY(ext_params, key); -+} -+ -+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1); -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ struct dpsw_acl_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR, -+ cmd_flags, -+ token); -+ DPSW_CMD_ACL_GET_ATTR(cmd, acl_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_ACL_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_ctrl_if_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_ctrl_if_pools_cfg *pools) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS, -+ cmd_flags, -+ token); -+ DPSW_CMD_CTRL_IF_SET_POOLS(cmd, pools); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+/** -+* @brief Function disables control interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -diff --git a/drivers/net/dpaa2/mc/fsl_dpaiop.h b/drivers/net/dpaa2/mc/fsl_dpaiop.h -new file mode 100644 -index 0000000..b039b2a ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpaiop.h -@@ -0,0 +1,494 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPAIOP_H -+#define __FSL_DPAIOP_H -+ -+struct fsl_mc_io; -+ -+/* Data Path AIOP API -+ * Contains initialization APIs and runtime control APIs for DPAIOP -+ */ -+ -+/** -+ * dpaiop_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpaiop_id: DPAIOP unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpaiop_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpaiop_id, -+ uint16_t *token); -+ -+/** -+ * dpaiop_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_close(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token); -+ -+/** -+ * struct dpaiop_cfg - Structure representing DPAIOP configuration -+ * @aiop_id: AIOP ID -+ * @aiop_container_id: AIOP container ID -+ */ -+struct dpaiop_cfg { -+ int aiop_id; -+ int aiop_container_id; -+}; -+ -+/** -+ * dpaiop_create() - Create the DPAIOP object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPAIOP object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpaiop_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpaiop_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpaiop_destroy() - Destroy the DPAIOP object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpaiop_destroy(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token); -+ -+/** -+ * dpaiop_reset() - Reset the DPAIOP, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_reset(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token); -+ -+/** -+ * struct dpaiop_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpaiop_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpaiop_set_irq() - Set IRQ information for the DPAIOP to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpaiop_irq_cfg *irq_cfg); -+ -+/** -+ * dpaiop_get_irq() - Get IRQ information from the DPAIOP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpaiop_irq_cfg *irq_cfg); -+ -+/** -+ * dpaiop_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpaiop_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpaiop_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpaiop_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpaiop_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpaiop_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpaiop_attr - Structure representing DPAIOP attributes -+ * @id: AIOP ID -+ * @version: DPAIOP version -+ */ -+struct dpaiop_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPAIOP version -+ * @major: DPAIOP major version -+ * @minor: DPAIOP minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpaiop_get_attributes - Retrieve DPAIOP attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_attr *attr); -+ -+/** -+ * struct dpaiop_load_cfg - AIOP load configuration -+ * @options: AIOP load options -+ * @img_iova: I/O virtual address of AIOP ELF image -+ * @img_size: Size of AIOP ELF image in memory (in bytes) -+ */ -+struct dpaiop_load_cfg { -+ uint64_t options; -+ uint64_t img_iova; -+ uint32_t img_size; -+}; -+ -+/** -+ * dpaiop_load_aiop() - Loads an image to AIOP -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @cfg: AIOP load configurations -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_load(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_load_cfg *cfg); -+ -+#define DPAIOP_RUN_OPT_DEBUG 0x0000000000000001ULL -+ -+/** -+ * struct dpaiop_run_cfg - AIOP run configuration -+ * @cores_mask: Mask of AIOP cores to run (core 0 in most significant bit) -+ * @options: Execution options (currently none defined) -+ * @args_iova: I/O virtual address of AIOP arguments -+ * @args_size: Size of AIOP arguments in memory (in bytes) -+ */ -+struct dpaiop_run_cfg { -+ uint64_t cores_mask; -+ uint64_t options; -+ uint64_t args_iova; -+ uint32_t args_size; -+}; -+ -+/** -+ * dpaiop_run_aiop() - Starts AIOP execution -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @cfg: AIOP run configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_run(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpaiop_run_cfg *cfg); -+ -+/** -+ * struct dpaiop_sl_version - AIOP SL (Service Layer) version -+ * @major: AIOP SL major version number -+ * @minor: AIOP SL minor version number -+ * @revision: AIOP SL revision number -+ */ -+struct dpaiop_sl_version { -+ uint32_t major; -+ uint32_t minor; -+ uint32_t revision; -+}; -+ -+/** -+ * dpaiop_get_sl_version() - Get AIOP SL (Service Layer) version -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @version: AIOP SL version number -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_sl_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpaiop_sl_version *version); -+ -+/** -+ * AIOP states -+ * -+ * AIOP internal states, can be retrieved by calling dpaiop_get_state() routine -+ */ -+ -+/** -+ * AIOP reset successfully completed. -+ */ -+#define DPAIOP_STATE_RESET_DONE 0x00000000 -+/** -+ * AIOP reset is ongoing. -+ */ -+#define DPAIOP_STATE_RESET_ONGOING 0x00000001 -+ -+/** -+ * AIOP image loading successfully completed. -+ */ -+#define DPAIOP_STATE_LOAD_DONE 0x00000002 -+/** -+ * AIOP image loading is ongoing. -+ */ -+#define DPAIOP_STATE_LOAD_ONGIONG 0x00000004 -+/** -+ * AIOP image loading completed with error. -+ */ -+#define DPAIOP_STATE_LOAD_ERROR 0x00000008 -+ -+/** -+ * Boot process of AIOP cores is ongoing. -+ */ -+#define DPAIOP_STATE_BOOT_ONGOING 0x00000010 -+/** -+ * Boot process of AIOP cores completed with an error. -+ */ -+#define DPAIOP_STATE_BOOT_ERROR 0x00000020 -+/** -+ * AIOP cores are functional and running -+ */ -+#define DPAIOP_STATE_RUNNING 0x00000040 -+/** @} */ -+ -+/** -+ * dpaiop_get_state() - Get AIOP state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @state: AIOP state -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t *state); -+ -+/** -+ * dpaiop_set_time_of_day() - Set AIOP internal time-of-day -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @time_of_day: Current number of milliseconds since the Epoch -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_set_time_of_day(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t time_of_day); -+ -+/** -+ * dpaiop_get_time_of_day() - Get AIOP internal time-of-day -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPAIOP object -+ * @time_of_day: Current number of milliseconds since the Epoch -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpaiop_get_time_of_day(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t *time_of_day); -+ -+#endif /* __FSL_DPAIOP_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h b/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h -new file mode 100644 -index 0000000..5b77bb8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h -@@ -0,0 +1,190 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPAIOP_CMD_H -+#define _FSL_DPAIOP_CMD_H -+ -+/* DPAIOP Version */ -+#define DPAIOP_VER_MAJOR 1 -+#define DPAIOP_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPAIOP_CMDID_CLOSE 0x800 -+#define DPAIOP_CMDID_OPEN 0x80a -+#define DPAIOP_CMDID_CREATE 0x90a -+#define DPAIOP_CMDID_DESTROY 0x900 -+ -+#define DPAIOP_CMDID_GET_ATTR 0x004 -+#define DPAIOP_CMDID_RESET 0x005 -+ -+#define DPAIOP_CMDID_SET_IRQ 0x010 -+#define DPAIOP_CMDID_GET_IRQ 0x011 -+#define DPAIOP_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPAIOP_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPAIOP_CMDID_SET_IRQ_MASK 0x014 -+#define DPAIOP_CMDID_GET_IRQ_MASK 0x015 -+#define DPAIOP_CMDID_GET_IRQ_STATUS 0x016 -+#define DPAIOP_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPAIOP_CMDID_LOAD 0x280 -+#define DPAIOP_CMDID_RUN 0x281 -+#define DPAIOP_CMDID_GET_SL_VERSION 0x282 -+#define DPAIOP_CMDID_GET_STATE 0x283 -+#define DPAIOP_CMDID_SET_TIME_OF_DAY 0x284 -+#define DPAIOP_CMDID_GET_TIME_OF_DAY 0x285 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_OPEN(cmd, dpaiop_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpaiop_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->aiop_id);\ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->aiop_container_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_LOAD(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->img_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->img_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_RUN(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->args_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->cores_mask); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->args_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_SL_VERSION(cmd, version) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, version->major);\ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, version->minor);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, version->revision);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_STATE(cmd, state) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_CMD_SET_TIME_OF_DAY(cmd, time_of_day) \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time_of_day) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPAIOP_RSP_GET_TIME_OF_DAY(cmd, time_of_day) \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, time_of_day) -+ -+#endif /* _FSL_DPAIOP_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpbp.h b/drivers/net/dpaa2/mc/fsl_dpbp.h -new file mode 100644 -index 0000000..9856bb8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpbp.h -@@ -0,0 +1,438 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPBP_H -+#define __FSL_DPBP_H -+ -+/* Data Path Buffer Pool API -+ * Contains initialization APIs and runtime control APIs for DPBP -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpbp_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpbp_id: DPBP unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpbp_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpbp_id, -+ uint16_t *token); -+ -+/** -+ * dpbp_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpbp_cfg - Structure representing DPBP configuration -+ * @options: place holder -+ */ -+struct dpbp_cfg { -+ uint32_t options; -+}; -+ -+/** -+ * dpbp_create() - Create the DPBP object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPBP object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpbp_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpbp_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpbp_destroy() - Destroy the DPBP object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpbp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_enable() - Enable the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_disable() - Disable the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_is_enabled() - Check if the DPBP is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpbp_reset() - Reset the DPBP, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpbp_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpbp_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpbp_irq_cfg *irq_cfg); -+ -+/** -+ * dpbp_get_irq() - Get IRQ information from the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpbp_irq_cfg *irq_cfg); -+ -+/** -+ * dpbp_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpbp_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpbp_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpbp_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpbp_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpbp_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpbp_attr - Structure representing DPBP attributes -+ * @id: DPBP object ID -+ * @version: DPBP version -+ * @bpid: Hardware buffer pool ID; should be used as an argument in -+ * acquire/release operations on buffers -+ */ -+struct dpbp_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPBP version -+ * @major: DPBP major version -+ * @minor: DPBP minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint16_t bpid; -+}; -+ -+/** -+ * dpbp_get_attributes - Retrieve DPBP attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_attr *attr); -+ -+/** -+ * DPBP notifications options -+ */ -+ -+/** -+ * BPSCN write will attempt to allocate into a cache (coherent write) -+ */ -+#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001 -+ -+/** -+ * struct dpbp_notification_cfg - Structure representing DPBP notifications -+ * towards software -+ * @depletion_entry: below this threshold the pool is "depleted"; -+ * set it to '0' to disable it -+ * @depletion_exit: greater than or equal to this threshold the pool exit its -+ * "depleted" state -+ * @surplus_entry: above this threshold the pool is in "surplus" state; -+ * set it to '0' to disable it -+ * @surplus_exit: less than or equal to this threshold the pool exit its -+ * "surplus" state -+ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry' -+ * is not '0' (enable); I/O virtual address (must be in DMA-able memory), -+ * must be 16B aligned. -+ * @message_ctx: The context that will be part of the BPSCN message and will -+ * be written to 'message_iova' -+ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_' values -+ */ -+struct dpbp_notification_cfg { -+ uint32_t depletion_entry; -+ uint32_t depletion_exit; -+ uint32_t surplus_entry; -+ uint32_t surplus_exit; -+ uint64_t message_iova; -+ uint64_t message_ctx; -+ uint16_t options; -+}; -+ -+/** -+ * dpbp_set_notifications() - Set notifications towards software -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @cfg: notifications configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg); -+ -+/** -+ * dpbp_get_notifications() - Get the notifications configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @cfg: notifications configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg); -+ -+#endif /* __FSL_DPBP_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h b/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h -new file mode 100644 -index 0000000..71ad96a ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h -@@ -0,0 +1,172 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPBP_CMD_H -+#define _FSL_DPBP_CMD_H -+ -+/* DPBP Version */ -+#define DPBP_VER_MAJOR 2 -+#define DPBP_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPBP_CMDID_CLOSE 0x800 -+#define DPBP_CMDID_OPEN 0x804 -+#define DPBP_CMDID_CREATE 0x904 -+#define DPBP_CMDID_DESTROY 0x900 -+ -+#define DPBP_CMDID_ENABLE 0x002 -+#define DPBP_CMDID_DISABLE 0x003 -+#define DPBP_CMDID_GET_ATTR 0x004 -+#define DPBP_CMDID_RESET 0x005 -+#define DPBP_CMDID_IS_ENABLED 0x006 -+ -+#define DPBP_CMDID_SET_IRQ 0x010 -+#define DPBP_CMDID_GET_IRQ 0x011 -+#define DPBP_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPBP_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPBP_CMDID_SET_IRQ_MASK 0x014 -+#define DPBP_CMDID_GET_IRQ_MASK 0x015 -+#define DPBP_CMDID_GET_IRQ_STATUS 0x016 -+#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0 -+#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_OPEN(cmd, dpbp_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->bpid); \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_SET_NOTIFICATIONS(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->depletion_entry); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->depletion_exit);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->surplus_entry);\ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->surplus_exit);\ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options);\ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPBP_CMD_GET_NOTIFICATIONS(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, cfg->depletion_entry); \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->depletion_exit);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->surplus_entry);\ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->surplus_exit);\ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options);\ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova);\ -+} while (0) -+#endif /* _FSL_DPBP_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpci.h b/drivers/net/dpaa2/mc/fsl_dpci.h -new file mode 100644 -index 0000000..d885935 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpci.h -@@ -0,0 +1,594 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPCI_H -+#define __FSL_DPCI_H -+ -+/* Data Path Communication Interface API -+ * Contains initialization APIs and runtime control APIs for DPCI -+ */ -+ -+struct fsl_mc_io; -+ -+/** General DPCI macros */ -+ -+/** -+ * Maximum number of Tx/Rx priorities per DPCI object -+ */ -+#define DPCI_PRIO_NUM 2 -+ -+/** -+ * Indicates an invalid frame queue -+ */ -+#define DPCI_FQID_NOT_VALID (uint32_t)(-1) -+ -+/** -+ * All queues considered; see dpci_set_rx_queue() -+ */ -+#define DPCI_ALL_QUEUES (uint8_t)(-1) -+ -+/** -+ * dpci_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpci_id: DPCI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpci_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpci_id, -+ uint16_t *token); -+ -+/** -+ * dpci_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpci_cfg - Structure representing DPCI configuration -+ * @num_of_priorities: Number of receive priorities (queues) for the DPCI; -+ * note, that the number of transmit priorities (queues) -+ * is determined by the number of receive priorities of -+ * the peer DPCI object -+ */ -+struct dpci_cfg { -+ uint8_t num_of_priorities; -+}; -+ -+/** -+ * dpci_create() - Create the DPCI object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPCI object, allocate required resources and perform required -+ * initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpci_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpci_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpci_destroy() - Destroy the DPCI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpci_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpci_enable() - Enable the DPCI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpci_disable() - Disable the DPCI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpci_is_enabled() - Check if the DPCI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpci_reset() - Reset the DPCI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** DPCI IRQ Index and Events */ -+ -+/** -+ * IRQ index -+ */ -+#define DPCI_IRQ_INDEX 0 -+ -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPCI_IRQ_EVENT_LINK_CHANGED 0x00000001 -+/** -+ * IRQ event - indicates a connection event -+ */ -+#define DPCI_IRQ_EVENT_CONNECTED 0x00000002 -+/** -+ * IRQ event - indicates a disconnection event -+ */ -+#define DPCI_IRQ_EVENT_DISCONNECTED 0x00000004 -+ -+/** -+ * struct dpci_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpci_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpci_set_irq() - Set IRQ information for the DPCI to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpci_irq_cfg *irq_cfg); -+ -+/** -+ * dpci_get_irq() - Get IRQ information from the DPCI. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpci_irq_cfg *irq_cfg); -+ -+/** -+ * dpci_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpci_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpci_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpci_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpci_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpci_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpci_attr - Structure representing DPCI attributes -+ * @id: DPCI object ID -+ * @version: DPCI version -+ * @num_of_priorities: Number of receive priorities -+ */ -+struct dpci_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPCI attributes -+ * @major: DPCI major version -+ * @minor: DPCI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint8_t num_of_priorities; -+}; -+ -+/** -+ * dpci_get_attributes() - Retrieve DPCI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpci_attr *attr); -+ -+/** -+ * struct dpci_peer_attr - Structure representing the peer DPCI attributes -+ * @peer_id: DPCI peer id; if no peer is connected returns (-1) -+ * @num_of_priorities: The pper's number of receive priorities; determines the -+ * number of transmit priorities for the local DPCI object -+ */ -+struct dpci_peer_attr { -+ int peer_id; -+ uint8_t num_of_priorities; -+}; -+ -+/** -+ * dpci_get_peer_attributes() - Retrieve peer DPCI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @attr: Returned peer attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_peer_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpci_peer_attr *attr); -+ -+/** -+ * dpci_get_link_state() - Retrieve the DPCI link state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @up: Returned link state; returns '1' if link is up, '0' otherwise -+ * -+ * DPCI can be connected to another DPCI, together they -+ * create a 'link'. In order to use the DPCI Tx and Rx queues, -+ * both objects must be enabled. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *up); -+ -+/** -+ * enum dpci_dest - DPCI destination types -+ * @DPCI_DEST_NONE: Unassigned destination; The queue is set in parked mode -+ * and does not generate FQDAN notifications; user is -+ * expected to dequeue from the queue based on polling or -+ * other user-defined method -+ * @DPCI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected -+ * to dequeue from the queue only after notification is -+ * received -+ * @DPCI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified -+ * DPCON object; -+ * user is expected to dequeue from the DPCON channel -+ */ -+enum dpci_dest { -+ DPCI_DEST_NONE = 0, -+ DPCI_DEST_DPIO = 1, -+ DPCI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpci_dest_cfg - Structure representing DPCI destination configuration -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid -+ * values are 0-1 or 0-7, depending on the number of priorities -+ * in that channel; not relevant for 'DPCI_DEST_NONE' option -+ */ -+struct dpci_dest_cfg { -+ enum dpci_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/** DPCI queue modification options */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPCI_QUEUE_OPT_USER_CTX 0x00000001 -+ -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPCI_QUEUE_OPT_DEST 0x00000002 -+ -+/** -+ * struct dpci_rx_queue_cfg - Structure representing RX queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPCI_QUEUE_OPT_' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; -+ * valid only if 'DPCI_QUEUE_OPT_USER_CTX' is contained in -+ * 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPCI_QUEUE_OPT_DEST' is contained in 'options' -+ */ -+struct dpci_rx_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpci_dest_cfg dest_cfg; -+}; -+ -+/** -+ * dpci_set_rx_queue() - Set Rx queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPCI creation; use -+ * DPCI_ALL_QUEUES to configure all Rx queues -+ * identically. -+ * @cfg: Rx queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ const struct dpci_rx_queue_cfg *cfg); -+ -+/** -+ * struct dpci_rx_queue_attr - Structure representing Rx queue attributes -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @fqid: Virtual FQID value to be used for dequeue operations -+ */ -+struct dpci_rx_queue_attr { -+ uint64_t user_ctx; -+ struct dpci_dest_cfg dest_cfg; -+ uint32_t fqid; -+}; -+ -+/** -+ * dpci_get_rx_queue() - Retrieve Rx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPCI creation -+ * @attr: Returned Rx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpci_rx_queue_attr *attr); -+ -+/** -+ * struct dpci_tx_queue_attr - Structure representing attributes of Tx queues -+ * @fqid: Virtual FQID to be used for sending frames to peer DPCI; -+ * returns 'DPCI_FQID_NOT_VALID' if a no peer is connected or if -+ * the selected priority exceeds the number of priorities of the -+ * peer DPCI object -+ */ -+struct dpci_tx_queue_attr { -+ uint32_t fqid; -+}; -+ -+/** -+ * dpci_get_tx_queue() - Retrieve Tx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @priority: Select the queue relative to number of -+ * priorities of the peer DPCI object -+ * @attr: Returned Tx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpci_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpci_tx_queue_attr *attr); -+ -+#endif /* __FSL_DPCI_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpci_cmd.h b/drivers/net/dpaa2/mc/fsl_dpci_cmd.h -new file mode 100644 -index 0000000..f45e435 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpci_cmd.h -@@ -0,0 +1,200 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPCI_CMD_H -+#define _FSL_DPCI_CMD_H -+ -+/* DPCI Version */ -+#define DPCI_VER_MAJOR 2 -+#define DPCI_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPCI_CMDID_CLOSE 0x800 -+#define DPCI_CMDID_OPEN 0x807 -+#define DPCI_CMDID_CREATE 0x907 -+#define DPCI_CMDID_DESTROY 0x900 -+ -+#define DPCI_CMDID_ENABLE 0x002 -+#define DPCI_CMDID_DISABLE 0x003 -+#define DPCI_CMDID_GET_ATTR 0x004 -+#define DPCI_CMDID_RESET 0x005 -+#define DPCI_CMDID_IS_ENABLED 0x006 -+ -+#define DPCI_CMDID_SET_IRQ 0x010 -+#define DPCI_CMDID_GET_IRQ 0x011 -+#define DPCI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPCI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPCI_CMDID_SET_IRQ_MASK 0x014 -+#define DPCI_CMDID_GET_IRQ_MASK 0x015 -+#define DPCI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPCI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPCI_CMDID_SET_RX_QUEUE 0x0e0 -+#define DPCI_CMDID_GET_LINK_STATE 0x0e1 -+#define DPCI_CMDID_GET_PEER_ATTR 0x0e2 -+#define DPCI_CMDID_GET_RX_QUEUE 0x0e3 -+#define DPCI_CMDID_GET_TX_QUEUE 0x0e4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_OPEN(cmd, dpci_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpci_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_of_priorities) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_of_priorities);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_PEER_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->peer_id);\ -+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_of_priorities);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_LINK_STATE(cmd, up) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, up) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority);\ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpci_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_RX_QUEUE(cmd, priority) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_RX_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 48, 4, enum dpci_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_CMD_GET_TX_QUEUE(cmd, priority) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCI_RSP_GET_TX_QUEUE(cmd, attr) \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid) -+ -+#endif /* _FSL_DPCI_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpcon.h b/drivers/net/dpaa2/mc/fsl_dpcon.h -new file mode 100644 -index 0000000..2555be5 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpcon.h -@@ -0,0 +1,407 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPCON_H -+#define __FSL_DPCON_H -+ -+/* Data Path Concentrator API -+ * Contains initialization APIs and runtime control APIs for DPCON -+ */ -+ -+struct fsl_mc_io; -+ -+/** General DPCON macros */ -+ -+/** -+ * Use it to disable notifications; see dpcon_set_notification() -+ */ -+#define DPCON_INVALID_DPIO_ID (int)(-1) -+ -+/** -+ * dpcon_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpcon_id: DPCON unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpcon_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpcon_id, -+ uint16_t *token); -+ -+/** -+ * dpcon_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpcon_cfg - Structure representing DPCON configuration -+ * @num_priorities: Number of priorities for the DPCON channel (1-8) -+ */ -+struct dpcon_cfg { -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpcon_create() - Create the DPCON object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPCON object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpcon_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpcon_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpcon_destroy() - Destroy the DPCON object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpcon_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_enable() - Enable the DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_disable() - Disable the DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_is_enabled() - Check if the DPCON is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpcon_reset() - Reset the DPCON, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpcon_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpcon_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpcon_set_irq() - Set IRQ information for the DPCON to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpcon_irq_cfg *irq_cfg); -+ -+/** -+ * dpcon_get_irq() - Get IRQ information from the DPCON. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpcon_irq_cfg *irq_cfg); -+ -+/** -+ * dpcon_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpcon_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpcon_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpcon_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpcon_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @status: interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpcon_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpcon_attr - Structure representing DPCON attributes -+ * @id: DPCON object ID -+ * @version: DPCON version -+ * @qbman_ch_id: Channel ID to be used by dequeue operation -+ * @num_priorities: Number of priorities for the DPCON channel (1-8) -+ */ -+struct dpcon_attr { -+ int id; -+ /** -+ * struct version - DPCON version -+ * @major: DPCON major version -+ * @minor: DPCON minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint16_t qbman_ch_id; -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpcon_get_attributes() - Retrieve DPCON attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @attr: Object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_attr *attr); -+ -+/** -+ * struct dpcon_notification_cfg - Structure representing notification parameters -+ * @dpio_id: DPIO object ID; must be configured with a notification channel; -+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID'; -+ * @priority: Priority selection within the DPIO channel; valid values -+ * are 0-7, depending on the number of priorities in that channel -+ * @user_ctx: User context value provided with each CDAN message -+ */ -+struct dpcon_notification_cfg { -+ int dpio_id; -+ uint8_t priority; -+ uint64_t user_ctx; -+}; -+ -+/** -+ * dpcon_set_notification() - Set DPCON notification destination -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @cfg: Notification parameters -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_set_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_notification_cfg *cfg); -+ -+#endif /* __FSL_DPCON_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h b/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h -new file mode 100644 -index 0000000..ecb40d0 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h -@@ -0,0 +1,162 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPCON_CMD_H -+#define _FSL_DPCON_CMD_H -+ -+/* DPCON Version */ -+#define DPCON_VER_MAJOR 2 -+#define DPCON_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPCON_CMDID_CLOSE 0x800 -+#define DPCON_CMDID_OPEN 0x808 -+#define DPCON_CMDID_CREATE 0x908 -+#define DPCON_CMDID_DESTROY 0x900 -+ -+#define DPCON_CMDID_ENABLE 0x002 -+#define DPCON_CMDID_DISABLE 0x003 -+#define DPCON_CMDID_GET_ATTR 0x004 -+#define DPCON_CMDID_RESET 0x005 -+#define DPCON_CMDID_IS_ENABLED 0x006 -+ -+#define DPCON_CMDID_SET_IRQ 0x010 -+#define DPCON_CMDID_GET_IRQ 0x011 -+#define DPCON_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPCON_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPCON_CMDID_SET_IRQ_MASK 0x014 -+#define DPCON_CMDID_GET_IRQ_MASK 0x015 -+#define DPCON_CMDID_GET_IRQ_STATUS 0x016 -+#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPCON_CMDID_SET_NOTIFICATION 0x100 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_OPEN(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\ -+} while (0) -+ -+#endif /* _FSL_DPCON_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdbg.h b/drivers/net/dpaa2/mc/fsl_dpdbg.h -new file mode 100644 -index 0000000..ead22e8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdbg.h -@@ -0,0 +1,635 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPDBG_H -+#define __FSL_DPDBG_H -+ -+#include -+#include -+#include -+ -+/* Data Path Debug API -+ * Contains initialization APIs and runtime control APIs for DPDBG -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpdbg_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpdbg_id: DPDBG unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdbg_id, -+ uint16_t *token); -+ -+/** -+ * dpdbg_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdbg_attr - Structure representing DPDBG attributes -+ * @id: DPDBG object ID -+ * @version: DPDBG version -+ */ -+struct dpdbg_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPDBG version -+ * @major: DPDBG major version -+ * @minor: DPDBG minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpdbg_get_attributes - Retrieve DPDBG attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdbg_attr *attr); -+ -+/** -+ * struct dpdbg_dpni_info - Info of DPNI -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; Non-power-of-2 values are rounded -+ * up to the next power-of-2 value as hardware demands it; -+ * '0' will be treated as '1' -+ * @qdid: Virtual QDID. -+ * @err_fqid: Virtual FQID for error queues -+ * @tx_conf_fqid: Virtual FQID for global TX confirmation queue -+ */ -+struct dpdbg_dpni_info { -+ uint8_t max_senders; -+ uint32_t qdid; -+ uint32_t err_fqid; -+ uint32_t tx_conf_fqid; -+}; -+ -+/** -+ * dpdbg_get_dpni_info() - Retrieve info for a specific DPNI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @info: The returned info -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpni_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_info *info); -+ -+/** -+ * dpdbg_get_dpni_private_fqid() - Retrieve the virtual TX confirmation queue -+ * FQID of the required DPNI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @sender_id: The requested sender ID -+ * @fqid: The returned virtual private TX confirmation FQID. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpni_priv_tx_conf_fqid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint8_t sender_id, -+ uint32_t *fqid); -+ -+/** -+ * struct dpdbg_dpcon_info - Info of DPCON -+ * @ch_id: Channel ID -+ */ -+struct dpdbg_dpcon_info { -+ uint32_t ch_id; -+}; -+ -+/** -+ * dpdbg_get_dpcon_info() - Retrieve info of DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpcon_id: The requested DPCON ID -+ * @info: The returned info. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpcon_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ struct dpdbg_dpcon_info *info); -+ -+/** -+ * struct dpdbg_dpbp_info - Info of DPBP -+ * @bpid: Virtual buffer pool ID -+ */ -+struct dpdbg_dpbp_info { -+ uint32_t bpid; -+}; -+ -+/** -+ * dpdbg_get_dpbp_info() - Retrieve info of DPBP -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpbp_id: The requested DPBP ID -+ * @info: The returned info. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpbp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpbp_id, -+ struct dpdbg_dpbp_info *info); -+ -+/** -+ * dpdbg_get_dpci_fqid() - Retrieve the virtual FQID of the required DPCI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpci_id: The requested DPCI ID -+ * @priority: Select the queue relative to number of priorities configured at -+ * DPCI creation -+ * @fqid: The returned virtual FQID. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpci_fqid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpci_id, -+ uint8_t priority, -+ uint32_t *fqid); -+ -+/** -+ * Maximum size for rule match (in bytes) -+ */ -+#define DPDBG_MAX_RULE_SIZE 56 -+/** -+ * Disable marking -+ */ -+#define DPDBG_DISABLE_MARKING 0xFF -+ -+/** -+ * dpdbg_prepare_ctlu_global_rule() - function prepare extract parameters -+ * @dpkg_rule: defining a full Key Generation profile (rule) -+ * @rule_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpdbg_set_global_marking() -+ */ -+int dpdbg_prepare_ctlu_global_rule(struct dpkg_profile_cfg *dpkg_rule, -+ uint8_t *rule_buf); -+ -+/** -+ * struct dpdbg_rule_cfg - Rule configuration for table lookup -+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory) -+ * @rule_iova: I/O virtual address of the rule (must be in DMA-able memory) -+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) -+ * @key_size: key and mask size (in bytes) -+ */ -+struct dpdbg_rule_cfg { -+ uint64_t key_iova; -+ uint64_t mask_iova; -+ uint64_t rule_iova; -+ uint8_t key_size; -+}; -+ -+/** -+ * dpdbg_set_ctlu_global_marking() - Set marking for all match rule frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @marking: The requested Debug marking -+ * @cfg: Marking rule to add -+ * -+ * Warning: must be called after dpdbg_prepare_global_rule() -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_ctlu_global_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t marking, -+ struct dpdbg_rule_cfg *cfg); -+ -+/** -+ * All traffic classes considered -+ */ -+#define DPDBG_DPNI_ALL_TCS (uint8_t)(-1) -+/** -+ * All flows within traffic class considered -+ */ -+#define DPDBG_DPNI_ALL_TC_FLOWS (uint8_t)(-1) -+/** -+ * All buffer pools considered -+ */ -+#define DPDBG_DPNI_ALL_DPBP (uint8_t)(-1) -+ -+/** -+ * struct dpdbg_dpni_rx_marking_cfg - Ingress frame configuration -+ * @tc_id: Traffic class ID (0-7); DPDBG_DPNI_ALL_TCS for all traffic classes. -+ * @flow_id: Rx flow id within the traffic class; use -+ * 'DPDBG_DPNI_ALL_TC_FLOWS' to set all flows within this tc_id; -+ * ignored if tc_id is set to 'DPDBG_DPNI_ALL_TCS'; -+ * @dpbp_id: buffer pool ID; 'DPDBG_DPNI_ALL_DPBP' to set all DPBP -+ * @marking: Marking for match frames; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ */ -+struct dpdbg_dpni_rx_marking_cfg { -+ uint8_t tc_id; -+ uint16_t flow_id; -+ uint16_t dpbp_id; -+ uint8_t marking; -+}; -+ -+/** -+ * dpdbg_set_dpni_rx_marking() - Set Rx frame marking for DPNI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @cfg: RX frame marking configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpni_rx_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_rx_marking_cfg *cfg); -+ -+/* selects global confirmation queues */ -+#define DPDBG_DPNI_GLOBAL_TX_CONF_QUEUE (uint16_t)(-1) -+ -+/** -+ * dpdbg_set_dpni_tx_conf_marking() - Set Tx frame marking for DPNI -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @sender_id: Sender Id for the confirmation queue; -+ * 'DPDBG_DPNI_GLOBAL_TX_CONF_QUEUE' for global confirmation queue -+ * @marking: The requested marking; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpni_tx_conf_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint16_t sender_id, -+ uint8_t marking); -+ -+/** -+ * dpdbg_set_dpio_marking() - Set debug frame marking on enqueue -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpio_id: The requested DPIO ID -+ * @marking: The requested marking; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpio_marking(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpio_id, -+ uint8_t marking); -+ -+/** -+ * enum dpdbg_verbosity_level - Trace verbosity level -+ * @DPDBG_VERBOSITY_LEVEL_DISABLE: Trace disabled -+ * @DPDBG_VERBOSITY_LEVEL_TERSE: Terse trace -+ * @DPDBG_VERBOSITY_LEVEL_VERBOSE: Verbose trace -+ */ -+enum dpdbg_verbosity_level { -+ DPDBG_VERBOSITY_LEVEL_DISABLE = 0, -+ DPDBG_VERBOSITY_LEVEL_TERSE, -+ DPDBG_VERBOSITY_LEVEL_VERBOSE -+}; -+ -+/** -+ * dpdbg_set_ctlu_global_trace() - Set global trace configuration for CTLU trace -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @cfg: trace rule to add -+ * -+ * Warning: must be called after dpdbg_prepare_global_rule() -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_ctlu_global_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdbg_rule_cfg *cfg); -+ -+/** -+ * Number of DPIO trace points -+ */ -+#define DPDBG_NUM_OF_DPIO_TRACE_POINTS 2 -+ -+/** -+ * enum dpdbg_dpio_trace_type - Define Trace point type -+ * @DPDBG_DPIO_TRACE_TYPE_ENQUEUE: This trace point triggers when an enqueue -+ * command, received via this portal, -+ * and containing a marked frame, is executed -+ * @DPDBG_DPIO_TRACE_TYPE_DEFERRED: This trace point triggers when the deferred -+ * enqueue of a marked frame received via this -+ * portal completes -+ */ -+enum dpdbg_dpio_trace_type { -+ DPDBG_DPIO_TRACE_TYPE_ENQUEUE = 0, -+ DPDBG_DPIO_TRACE_TYPE_DEFERRED = 1 -+}; -+ -+/** -+ * struct dpdbg_dpio_trace_cfg - Configure the behavior of a trace point -+ * when a frame marked with the specified DD code point is -+ * encountered -+ * @marking: this field will be written into the DD field of every FD -+ * enqueued in this DPIO. -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ * @verbosity: Verbosity level -+ * @enqueue_type: Enqueue trace point type defining a full Key Generation -+ * profile (rule) -+ */ -+struct dpdbg_dpio_trace_cfg { -+ uint8_t marking; -+ enum dpdbg_verbosity_level verbosity; -+ enum dpdbg_dpio_trace_type enqueue_type; -+}; -+ -+/** -+ * dpdbg_set_dpio_trace() - Set trace for DPIO for every enqueued frame to -+ * the portal -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpio_id: The requested DPIO ID -+ * @trace_point: Trace points configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpio_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpio_id, -+ struct dpdbg_dpio_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPIO_TRACE_POINTS]); -+ -+/** -+ * struct dpdbg_dpni_trace_cfg - Configure the behavior of a trace point when a -+ * @tc_id: Traffic class ID (0-7); DPDBG_DPNI_ALL_TCS for all traffic classes. -+ * @flow_id: Rx flow id within the traffic class; use -+ * 'DPDBG_DPNI_ALL_TC_FLOWS' to set all flows within this tc_id; -+ * ignored if tc_id is set to 'DPDBG_DPNI_ALL_TCS'; -+ * @dpbp_id: buffer pool ID; 'DPDBG_DPNI_ALL_DPBP' to set all DPBP -+ * @marking: Marking for match frames; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ */ -+struct dpdbg_dpni_rx_trace_cfg { -+ uint8_t tc_id; -+ uint16_t flow_id; -+ uint16_t dpbp_id; -+ uint8_t marking; -+}; -+ -+/** -+ * dpdbg_set_dpni_rx_trace() - Set trace for DPNI ingress (WRIOP ingress). -+ * in case of multiple requests for different DPNIs - the trace -+ * will be for the latest DPNI requested. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @trace_cfg: Trace configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpni_rx_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ struct dpdbg_dpni_rx_trace_cfg *trace_cfg); -+ -+/** -+ * All DPNI senders -+ */ -+#define DPDBG_DPNI_ALL_SENDERS (uint16_t)(-1) -+ -+/** -+ * struct dpdbg_dpni_trace_cfg - Configure the behavior of a trace point when a -+ * frame marked with the specified DD code point is encountered -+ * @marking: The requested debug marking; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ */ -+struct dpdbg_dpni_tx_trace_cfg { -+ uint8_t marking; -+}; -+ -+/** -+ * dpdbg_set_dpni_tx_trace() - Set trace for DPNI dequeued frames -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @sender_id: Sender ID; 'DPDBG_DPNI_ALL_SENDERS' for all senders -+ * @trace_cfg: Trace configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpni_tx_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ uint16_t sender_id, -+ struct dpdbg_dpni_tx_trace_cfg *trace_cfg); -+ -+/** -+ * Number of DPCON trace points -+ */ -+#define DPDBG_NUM_OF_DPCON_TRACE_POINTS 2 -+ -+/** -+ * struct dpdbg_dpcon_trace_cfg - Configure the behavior of a trace point when a -+ * frame marked with the specified DD code point is encountered -+ * @marking: The requested debug marking; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ * @verbosity: Verbosity level -+ */ -+struct dpdbg_dpcon_trace_cfg { -+ uint8_t marking; -+ enum dpdbg_verbosity_level verbosity; -+}; -+ -+/** -+ * dpdbg_set_dpcon_trace() - Set trace for DPCON when a frame marked with a -+ * specified marking is dequeued from a WQ in the -+ * channel selected -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpcon_id: The requested DPCON ID -+ * @trace_point: Trace points configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpcon_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ struct dpdbg_dpcon_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPCON_TRACE_POINTS]); -+ -+/** -+ * Number of DPSECI trace points -+ */ -+#define DPDBG_NUM_OF_DPSECI_TRACE_POINTS 2 -+ -+/** -+ * struct dpdbg_dpseci_trace_cfg - Configure the behavior of a trace point when -+ * a frame marked with the specified DD code point is -+ * encountered -+ * @marking: The requested debug marking; -+ * 'DPDBG_DISABLE_MARKING' for disable marking -+ * @verbosity: Verbosity level -+ */ -+struct dpdbg_dpseci_trace_cfg { -+ uint8_t marking; -+ enum dpdbg_verbosity_level verbosity; -+}; -+ -+/** -+ * dpdbg_set_dpseci_trace() - Set trace for DPSECI when a frame marked with the -+ * specific marking is enqueued via this portal. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpseci_id: The requested DPSECI ID -+ * @trace_point: Trace points configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_set_dpseci_trace(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpseci_id, -+ struct dpdbg_dpseci_trace_cfg -+ trace_point[DPDBG_NUM_OF_DPSECI_TRACE_POINTS]); -+ -+/** -+ * dpdbg_get_dpmac_counter() - DPMAC packet throughput -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpmac_id: The requested DPMAC ID -+ * @counter_type: The requested DPMAC counter -+ * @counter: Returned counter value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpmac_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpmac_id, -+ enum dpmac_counter counter_type, -+ uint64_t *counter); -+ -+/** -+ * dpdbg_get_dpni_counter() - DPNI packet throughput -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDBG object -+ * @dpni_id: The requested DPNI ID -+ * @counter_type: The requested DPNI counter -+ * @counter: Returned counter value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdbg_get_dpni_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpni_id, -+ enum dpni_counter counter_type, -+ uint64_t *counter); -+ -+#endif /* __FSL_DPDBG_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h -new file mode 100644 -index 0000000..b672788 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h -@@ -0,0 +1,249 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPDBG_CMD_H -+#define _FSL_DPDBG_CMD_H -+ -+/* DPDBG Version */ -+#define DPDBG_VER_MAJOR 1 -+#define DPDBG_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPDBG_CMDID_CLOSE 0x800 -+#define DPDBG_CMDID_OPEN 0x80F -+ -+#define DPDBG_CMDID_GET_ATTR 0x004 -+ -+#define DPDBG_CMDID_GET_DPNI_INFO 0x130 -+#define DPDBG_CMDID_GET_DPNI_PRIV_TX_CONF_FQID 0x131 -+#define DPDBG_CMDID_GET_DPCON_INFO 0x132 -+#define DPDBG_CMDID_GET_DPBP_INFO 0x133 -+#define DPDBG_CMDID_GET_DPCI_FQID 0x134 -+ -+#define DPDBG_CMDID_SET_CTLU_GLOBAL_MARKING 0x135 -+#define DPDBG_CMDID_SET_DPNI_RX_MARKING 0x136 -+#define DPDBG_CMDID_SET_DPNI_TX_CONF_MARKING 0x137 -+#define DPDBG_CMDID_SET_DPIO_MARKING 0x138 -+ -+#define DPDBG_CMDID_SET_CTLU_GLOBAL_TRACE 0x140 -+#define DPDBG_CMDID_SET_DPIO_TRACE 0x141 -+#define DPDBG_CMDID_SET_DPNI_RX_TRACE 0x142 -+#define DPDBG_CMDID_SET_DPNI_TX_TRACE 0x143 -+#define DPDBG_CMDID_SET_DPCON_TRACE 0x145 -+#define DPDBG_CMDID_SET_DPSECI_TRACE 0x146 -+ -+#define DPDBG_CMDID_GET_DPMAC_COUNTER 0x150 -+#define DPDBG_CMDID_GET_DPNI_COUNTER 0x151 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_OPEN(cmd, dpdbg_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdbg_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPNI_INFO(cmd, dpni_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPNI_INFO(cmd, info) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, info->qdid);\ -+ MC_RSP_OP(cmd, 1, 32, 8, uint8_t, info->max_senders);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, info->err_fqid);\ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, info->tx_conf_fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPNI_PRIV_TX_CONF_FQID(cmd, dpni_id, sender_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, sender_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPNI_PRIV_TX_CONF_FQID(cmd, fqid) \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, fqid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPCON_INFO(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPCON_INFO(cmd, info) \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, info->ch_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPBP_INFO(cmd, dpbp_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPBP_INFO(cmd, info) \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, info->bpid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPCI_FQID(cmd, dpci_id, priority) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpci_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, priority);\ -+} while (0) -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPCI_FQID(cmd, fqid) \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, fqid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_CTLU_GLOBAL_MARKING(cmd, marking, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, marking);\ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->rule_iova); \ -+} while (0) -+ -+#define DPDBG_CMD_SET_DPNI_RX_MARKING(cmd, dpni_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->tc_id);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->flow_id);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->dpbp_id);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->marking);\ -+} while (0) -+ -+#define DPDBG_CMD_SET_DPNI_TX_CONF_MARKING(cmd, dpni_id, sender_id, marking) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, sender_id);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, marking);\ -+} while (0) -+ -+#define DPDBG_CMD_SET_DPIO_MARKING(cmd, dpio_id, marking) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, marking);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_CTLU_GLOBAL_TRACE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->rule_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_DPIO_TRACE(cmd, dpio_id, trace_point) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \ -+ trace_point[0].verbosity); \ -+ MC_CMD_OP(cmd, 1, 4, 4, enum dpdbg_dpio_trace_type, \ -+ trace_point[0].enqueue_type); \ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \ -+ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \ -+ trace_point[1].verbosity); \ -+ MC_CMD_OP(cmd, 1, 36, 4, enum dpdbg_dpio_trace_type, \ -+ trace_point[1].enqueue_type); \ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_DPNI_RX_TRACE(cmd, dpni_id, trace_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, trace_cfg->tc_id);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, trace_cfg->flow_id);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, trace_cfg->dpbp_id);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, trace_cfg->marking);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_DPNI_TX_TRACE(cmd, dpni_id, sender_id, trace_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, sender_id);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, trace_cfg->marking);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_DPCON_TRACE(cmd, dpcon_id, trace_point) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \ -+ trace_point[0].verbosity); \ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \ -+ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \ -+ trace_point[1].verbosity); \ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_SET_DPSECI_TRACE(cmd, dpseci_id, trace_point) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \ -+ trace_point[0].verbosity); \ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \ -+ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \ -+ trace_point[1].verbosity); \ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpmac_id, counter_type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id);\ -+ MC_CMD_OP(cmd, 0, 32, 16, enum dpmac_counter, counter_type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPMAC_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_CMD_GET_DPNI_COUNTER(cmd, dpni_id, counter_type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\ -+ MC_CMD_OP(cmd, 0, 32, 16, enum dpni_counter, counter_type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDBG_RSP_GET_DPNI_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+#endif /* _FSL_DPDBG_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdcei.h b/drivers/net/dpaa2/mc/fsl_dpdcei.h -new file mode 100644 -index 0000000..319795c ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdcei.h -@@ -0,0 +1,515 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPDCEI_H -+#define __FSL_DPDCEI_H -+ -+/* Data Path DCE Interface API -+ * Contains initialization APIs and runtime control APIs for DPDCEI -+ */ -+ -+struct fsl_mc_io; -+ -+/** General DPDCEI macros */ -+ -+/** -+ * Indicates an invalid frame queue -+ */ -+#define DPDCEI_FQID_NOT_VALID (uint32_t)(-1) -+ -+/** -+ * enum dpdcei_engine - DCE engine block -+ * @DPDCEI_ENGINE_COMPRESSION: Engine compression -+ * @DPDCEI_ENGINE_DECOMPRESSION: Engine decompression -+ */ -+enum dpdcei_engine { -+ DPDCEI_ENGINE_COMPRESSION, -+ DPDCEI_ENGINE_DECOMPRESSION -+}; -+ -+/** -+ * dpdcei_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @dpdcei_id: DPDCEI unique ID -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpdcei_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdcei_id, -+ uint16_t *token); -+ -+/** -+ * dpdcei_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdcei_cfg - Structure representing DPDCEI configuration -+ * @engine: compression or decompression engine to be selected -+ * @priority: Priority for the DCE hardware processing (valid values 1-8). -+ */ -+struct dpdcei_cfg { -+ enum dpdcei_engine engine; -+ uint8_t priority; -+}; -+ -+/** -+ * dpdcei_create() - Create the DPDCEI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @cfg: configuration parameters -+ * -+ * Create the DPDCEI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpdcei_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdcei_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpdcei_destroy() - Destroy the DPDCEI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpdcei_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdcei_enable() - Enable the DPDCEI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdcei_disable() - Disable the DPDCEI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdcei_is_enabled() - Check if the DPDCEI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @en: Return '1' for object enabled/'0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpdcei_reset() - Reset the DPDCEI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdcei_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpdcei_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpdcei_set_irq() - Set IRQ information for the DPDCEI to trigger an interrupt -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdcei_irq_cfg *irq_cfg); -+ -+/** -+ * dpdcei_get_irq() - Get IRQ information from the DPDCEI -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdcei_irq_cfg *irq_cfg); -+ -+/** -+ * dpdcei_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpdcei_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned Interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpdcei_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpdcei_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpdcei_get_irq_status() - Get the current status of any pending interrupts -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpdcei_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+/** -+ * struct dpdcei_attr - Structure representing DPDCEI attributes -+ * @id: DPDCEI object ID -+ * @engine: DCE engine block -+ * @version: DPDCEI version -+ */ -+struct dpdcei_attr { -+ int id; -+ enum dpdcei_engine engine; -+ /** -+ * struct version - DPDCEI version -+ * @major: DPDCEI major version -+ * @minor: DPDCEI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpdcei_get_attributes() - Retrieve DPDCEI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_attr *attr); -+ -+/** -+ * enum dpdcei_dest - DPDCEI destination types -+ * @DPDCEI_DEST_NONE: Unassigned destination; The queue is set in parked mode -+ * and does not generate FQDAN notifications; -+ * user is expected to dequeue from the queue based on -+ * polling or other user-defined method -+ * @DPDCEI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to -+ * dequeue from the queue only after notification is -+ * received -+ * @DPDCEI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified -+ * DPCON object; -+ * user is expected to dequeue from the DPCON channel -+ */ -+enum dpdcei_dest { -+ DPDCEI_DEST_NONE = 0, -+ DPDCEI_DEST_DPIO = 1, -+ DPDCEI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpdcei_dest_cfg - Structure representing DPDCEI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPDCEI_DEST_NONE' option -+ */ -+struct dpdcei_dest_cfg { -+ enum dpdcei_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/** DPDCEI queue modification options */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPDCEI_QUEUE_OPT_USER_CTX 0x00000001 -+ -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPDCEI_QUEUE_OPT_DEST 0x00000002 -+ -+/** -+ * struct dpdcei_rx_queue_cfg - RX queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPDCEI_QUEUE_OPT_' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; -+ * valid only if 'DPDCEI_QUEUE_OPT_USER_CTX' is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPDCEI_QUEUE_OPT_DEST' is contained in 'options' -+ */ -+struct dpdcei_rx_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpdcei_dest_cfg dest_cfg; -+}; -+ -+/** -+ * dpdcei_set_rx_queue() - Set Rx queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @cfg: Rx queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpdcei_rx_queue_cfg *cfg); -+ -+/** -+ * struct dpdcei_rx_queue_attr - Structure representing attributes of Rx queues -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @fqid: Virtual FQID value to be used for dequeue operations -+ */ -+struct dpdcei_rx_queue_attr { -+ uint64_t user_ctx; -+ struct dpdcei_dest_cfg dest_cfg; -+ uint32_t fqid; -+}; -+ -+/** -+ * dpdcei_get_rx_queue() - Retrieve Rx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @attr: Returned Rx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_rx_queue_attr *attr); -+ -+/** -+ * struct dpdcei_tx_queue_attr - Structure representing attributes of Tx queues -+ * @fqid: Virtual FQID to be used for sending frames to DCE hardware -+ */ -+struct dpdcei_tx_queue_attr { -+ uint32_t fqid; -+}; -+ -+/** -+ * dpdcei_get_tx_queue() - Retrieve Tx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDCEI object -+ * @attr: Returned Tx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdcei_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdcei_tx_queue_attr *attr); -+ -+#endif /* __FSL_DPDCEI_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h -new file mode 100644 -index 0000000..8452d88 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h -@@ -0,0 +1,182 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPDCEI_CMD_H -+#define _FSL_DPDCEI_CMD_H -+ -+/* DPDCEI Version */ -+#define DPDCEI_VER_MAJOR 1 -+#define DPDCEI_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPDCEI_CMDID_CLOSE 0x800 -+#define DPDCEI_CMDID_OPEN 0x80D -+#define DPDCEI_CMDID_CREATE 0x90D -+#define DPDCEI_CMDID_DESTROY 0x900 -+ -+#define DPDCEI_CMDID_ENABLE 0x002 -+#define DPDCEI_CMDID_DISABLE 0x003 -+#define DPDCEI_CMDID_GET_ATTR 0x004 -+#define DPDCEI_CMDID_RESET 0x005 -+#define DPDCEI_CMDID_IS_ENABLED 0x006 -+ -+#define DPDCEI_CMDID_SET_IRQ 0x010 -+#define DPDCEI_CMDID_GET_IRQ 0x011 -+#define DPDCEI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPDCEI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPDCEI_CMDID_SET_IRQ_MASK 0x014 -+#define DPDCEI_CMDID_GET_IRQ_MASK 0x015 -+#define DPDCEI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPDCEI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPDCEI_CMDID_SET_RX_QUEUE 0x1B0 -+#define DPDCEI_CMDID_GET_RX_QUEUE 0x1B1 -+#define DPDCEI_CMDID_GET_TX_QUEUE 0x1B2 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_OPEN(cmd, dpdcei_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdcei_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, enum dpdcei_engine, cfg->engine);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priority);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, enum dpdcei_engine, attr->engine); \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_CMD_SET_RX_QUEUE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpdcei_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_RX_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 48, 4, enum dpdcei_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDCEI_RSP_GET_TX_QUEUE(cmd, attr) \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid) -+ -+#endif /* _FSL_DPDCEI_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdmai.h b/drivers/net/dpaa2/mc/fsl_dpdmai.h -new file mode 100644 -index 0000000..e931ce1 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdmai.h -@@ -0,0 +1,521 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPDMAI_H -+#define __FSL_DPDMAI_H -+ -+struct fsl_mc_io; -+ -+/* Data Path DMA Interface API -+ * Contains initialization APIs and runtime control APIs for DPDMAI -+ */ -+ -+/* General DPDMAI macros */ -+ -+/** -+ * Maximum number of Tx/Rx priorities per DPDMAI object -+ */ -+#define DPDMAI_PRIO_NUM 2 -+ -+/** -+ * All queues considered; see dpdmai_set_rx_queue() -+ */ -+#define DPDMAI_ALL_QUEUES (uint8_t)(-1) -+ -+/** -+ * dpdmai_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpdmai_id: DPDMAI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpdmai_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmai_id, -+ uint16_t *token); -+ -+/** -+ * dpdmai_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdmai_cfg - Structure representing DPDMAI configuration -+ * @priorities: Priorities for the DMA hardware processing; valid priorities are -+ * configured with values 1-8; the entry following last valid entry -+ * should be configured with 0 -+ */ -+struct dpdmai_cfg { -+ uint8_t priorities[DPDMAI_PRIO_NUM]; -+}; -+ -+/** -+ * dpdmai_create() - Create the DPDMAI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPDMAI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpdmai_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmai_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpdmai_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmai_is_enabled() - Check if the DPDMAI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdmai_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpdmai_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmai_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmai_get_irq() - Get IRQ information from the DPDMAI -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmai_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmai_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpdmai_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned Interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpdmai_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpdmai_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpdmai_get_irq_status() - Get the current status of any pending interrupts -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpdmai_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpdmai_attr - Structure representing DPDMAI attributes -+ * @id: DPDMAI object ID -+ * @version: DPDMAI version -+ * @num_of_priorities: number of priorities -+ */ -+struct dpdmai_attr { -+ int id; -+ /** -+ * struct version - DPDMAI version -+ * @major: DPDMAI major version -+ * @minor: DPDMAI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint8_t num_of_priorities; -+}; -+ -+/** -+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmai_attr *attr); -+ -+/** -+ * enum dpdmai_dest - DPDMAI destination types -+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode -+ * and does not generate FQDAN notifications; user is expected to dequeue -+ * from the queue based on polling or other user-defined method -+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to dequeue -+ * from the queue only after notification is received -+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified DPCON object; -+ * user is expected to dequeue from the DPCON channel -+ */ -+enum dpdmai_dest { -+ DPDMAI_DEST_NONE = 0, -+ DPDMAI_DEST_DPIO = 1, -+ DPDMAI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPDMAI_DEST_NONE' option -+ */ -+struct dpdmai_dest_cfg { -+ enum dpdmai_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/* DPDMAI queue modification options */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001 -+ -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPDMAI_QUEUE_OPT_DEST 0x00000002 -+ -+/** -+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPDMAI_QUEUE_OPT_' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; -+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options' -+ */ -+struct dpdmai_rx_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpdmai_dest_cfg dest_cfg; -+ -+}; -+ -+/** -+ * dpdmai_set_rx_queue() - Set Rx queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPDMAI creation; use -+ * DPDMAI_ALL_QUEUES to configure all Rx queues -+ * identically. -+ * @cfg: Rx queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ const struct dpdmai_rx_queue_cfg *cfg); -+ -+/** -+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @fqid: Virtual FQID value to be used for dequeue operations -+ */ -+struct dpdmai_rx_queue_attr { -+ uint64_t user_ctx; -+ struct dpdmai_dest_cfg dest_cfg; -+ uint32_t fqid; -+}; -+ -+/** -+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPDMAI creation -+ * @attr: Returned Rx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpdmai_rx_queue_attr *attr); -+ -+/** -+ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues -+ * @fqid: Virtual FQID to be used for sending frames to DMA hardware -+ */ -+ -+struct dpdmai_tx_queue_attr { -+ uint32_t fqid; -+}; -+ -+/** -+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPDMAI creation -+ * @attr: Returned Tx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpdmai_tx_queue_attr *attr); -+ -+#endif /* __FSL_DPDMAI_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h -new file mode 100644 -index 0000000..7c4a31a ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h -@@ -0,0 +1,191 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPDMAI_CMD_H -+#define _FSL_DPDMAI_CMD_H -+ -+/* DPDMAI Version */ -+#define DPDMAI_VER_MAJOR 2 -+#define DPDMAI_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPDMAI_CMDID_CLOSE 0x800 -+#define DPDMAI_CMDID_OPEN 0x80E -+#define DPDMAI_CMDID_CREATE 0x90E -+#define DPDMAI_CMDID_DESTROY 0x900 -+ -+#define DPDMAI_CMDID_ENABLE 0x002 -+#define DPDMAI_CMDID_DISABLE 0x003 -+#define DPDMAI_CMDID_GET_ATTR 0x004 -+#define DPDMAI_CMDID_RESET 0x005 -+#define DPDMAI_CMDID_IS_ENABLED 0x006 -+ -+#define DPDMAI_CMDID_SET_IRQ 0x010 -+#define DPDMAI_CMDID_GET_IRQ 0x011 -+#define DPDMAI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPDMAI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPDMAI_CMDID_SET_IRQ_MASK 0x014 -+#define DPDMAI_CMDID_GET_IRQ_MASK 0x015 -+#define DPDMAI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPDMAI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPDMAI_CMDID_SET_RX_QUEUE 0x1A0 -+#define DPDMAI_CMDID_GET_RX_QUEUE 0x1A1 -+#define DPDMAI_CMDID_GET_TX_QUEUE 0x1A2 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid) -+ -+#endif /* _FSL_DPDMAI_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux.h b/drivers/net/dpaa2/mc/fsl_dpdmux.h -new file mode 100644 -index 0000000..455a042 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdmux.h -@@ -0,0 +1,724 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPDMUX_H -+#define __FSL_DPDMUX_H -+ -+#include -+ -+struct fsl_mc_io; -+ -+/* Data Path Demux API -+ * Contains API for handling DPDMUX topology and functionality -+ */ -+ -+/** -+ * dpdmux_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpdmux_id: DPDMUX unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpdmux_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmux_id, -+ uint16_t *token); -+ -+/** -+ * dpdmux_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPDMUX general options -+ */ -+ -+/** -+ * Enable bridging between internal interfaces -+ */ -+#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL -+ -+#define DPDMUX_IRQ_INDEX_IF 0x0000 -+#define DPDMUX_IRQ_INDEX 0x0001 -+ -+/** -+ * IRQ event - Indicates that the link state changed -+ */ -+#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001 -+ -+/** -+ * enum dpdmux_manip - DPDMUX manipulation operations -+ * @DPDMUX_MANIP_NONE: No manipulation on frames -+ * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress -+ */ -+enum dpdmux_manip { -+ DPDMUX_MANIP_NONE = 0x0, -+ DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1 -+}; -+ -+/** -+ * enum dpdmux_method - DPDMUX method options -+ * @DPDMUX_METHOD_NONE: no DPDMUX method -+ * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address -+ * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address -+ * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN -+ * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN -+ */ -+enum dpdmux_method { -+ DPDMUX_METHOD_NONE = 0x0, -+ DPDMUX_METHOD_C_VLAN_MAC = 0x1, -+ DPDMUX_METHOD_MAC = 0x2, -+ DPDMUX_METHOD_C_VLAN = 0x3, -+ DPDMUX_METHOD_S_VLAN = 0x4 -+}; -+ -+/** -+ * struct dpdmux_cfg - DPDMUX configuration parameters -+ * @method: Defines the operation method for the DPDMUX address table -+ * @manip: Required manipulation operation -+ * @num_ifs: Number of interfaces (excluding the uplink interface) -+ * @adv: Advanced parameters; default is all zeros; -+ * use this structure to change default settings -+ */ -+struct dpdmux_cfg { -+ enum dpdmux_method method; -+ enum dpdmux_manip manip; -+ uint16_t num_ifs; -+ /** -+ * struct adv - Advanced parameters -+ * @options: DPDMUX options - combination of 'DPDMUX_OPT_' flags -+ * @max_dmat_entries: Maximum entries in DPDMUX address table -+ * 0 - indicates default: 64 entries per interface. -+ * @max_mc_groups: Number of multicast groups in DPDMUX table -+ * 0 - indicates default: 32 multicast groups -+ * @max_vlan_ids: max vlan ids allowed in the system - -+ * relevant only case of working in mac+vlan method. -+ * 0 - indicates default 16 vlan ids. -+ */ -+ struct { -+ uint64_t options; -+ uint16_t max_dmat_entries; -+ uint16_t max_mc_groups; -+ uint16_t max_vlan_ids; -+ } adv; -+}; -+ -+/** -+ * dpdmux_create() - Create the DPDMUX object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPDMUX object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpdmux_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmux_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpdmux_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmux_enable() - Enable DPDMUX functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmux_disable() - Disable DPDMUX functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmux_is_enabled() - Check if the DPDMUX is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdmux_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpdmux_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpdmux_set_irq() - Set IRQ information for the DPDMUX to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmux_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmux_get_irq() - Get IRQ information from the DPDMUX. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmux_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmux_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpdmux_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpdmux_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpdmux_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpdmux_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpdmux_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpdmux_attr - Structure representing DPDMUX attributes -+ * @id: DPDMUX object ID -+ * @version: DPDMUX version -+ * @options: Configuration options (bitmap) -+ * @method: DPDMUX address table method -+ * @manip: DPDMUX manipulation type -+ * @num_ifs: Number of interfaces (excluding the uplink interface) -+ * @mem_size: DPDMUX frame storage memory size -+ */ -+struct dpdmux_attr { -+ int id; -+ /** -+ * struct version - DPDMUX version -+ * @major: DPDMUX major version -+ * @minor: DPDMUX minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint64_t options; -+ enum dpdmux_method method; -+ enum dpdmux_manip manip; -+ uint16_t num_ifs; -+ uint16_t mem_size; -+}; -+ -+/** -+ * dpdmux_get_attributes() - Retrieve DPDMUX attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmux_attr *attr); -+ -+/** -+ * dpdmux_ul_set_max_frame_length() - Set the maximum frame length in DPDMUX -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @max_frame_length: The required maximum frame length -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length); -+ -+/** -+ * enum dpdmux_counter_type - Counter types -+ * @DPDMUX_CNT_ING_FRAME: Counts ingress frames -+ * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes -+ * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames -+ * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames -+ * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames -+ * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes -+ * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames -+ * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes -+ * @DPDMUX_CNT_EGR_FRAME: Counts egress frames -+ * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes -+ * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames -+ */ -+enum dpdmux_counter_type { -+ DPDMUX_CNT_ING_FRAME = 0x0, -+ DPDMUX_CNT_ING_BYTE = 0x1, -+ DPDMUX_CNT_ING_FLTR_FRAME = 0x2, -+ DPDMUX_CNT_ING_FRAME_DISCARD = 0x3, -+ DPDMUX_CNT_ING_MCAST_FRAME = 0x4, -+ DPDMUX_CNT_ING_MCAST_BYTE = 0x5, -+ DPDMUX_CNT_ING_BCAST_FRAME = 0x6, -+ DPDMUX_CNT_ING_BCAST_BYTES = 0x7, -+ DPDMUX_CNT_EGR_FRAME = 0x8, -+ DPDMUX_CNT_EGR_BYTE = 0x9, -+ DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa -+}; -+ -+/** -+ * enum dpdmux_accepted_frames_type - DPDMUX frame types -+ * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and -+ * priority-tagged frames -+ * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or -+ * priority-tagged frames that are received on this -+ * interface -+ * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames -+ * received on this interface are accepted -+ */ -+enum dpdmux_accepted_frames_type { -+ DPDMUX_ADMIT_ALL = 0, -+ DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1, -+ DPDMUX_ADMIT_ONLY_UNTAGGED = 2 -+}; -+ -+/** -+ * enum dpdmux_action - DPDMUX action for un-accepted frames -+ * @DPDMUX_ACTION_DROP: Drop un-accepted frames -+ * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the -+ * control interface -+ */ -+enum dpdmux_action { -+ DPDMUX_ACTION_DROP = 0, -+ DPDMUX_ACTION_REDIRECT_TO_CTRL = 1 -+}; -+ -+/** -+ * struct dpdmux_accepted_frames - Frame types configuration -+ * @type: Defines ingress accepted frames -+ * @unaccept_act: Defines action on frames not accepted -+ */ -+struct dpdmux_accepted_frames { -+ enum dpdmux_accepted_frames_type type; -+ enum dpdmux_action unaccept_act; -+}; -+ -+/** -+ * dpdmux_if_set_accepted_frames() - Set the accepted frame types -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs); -+ * @cfg: Frame types configuration -+ * -+ * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or -+ * priority-tagged frames are discarded. -+ * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or -+ * priority-tagged frames are accepted. -+ * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged, -+ * untagged and priority-tagged frame are accepted; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_accepted_frames *cfg); -+ -+/** -+ * struct dpdmux_if_attr - Structure representing frame types configuration -+ * @rate: Configured interface rate (in bits per second) -+ * @enabled: Indicates if interface is enabled -+ * @accept_frame_type: Indicates type of accepted frames for the interface -+ */ -+struct dpdmux_if_attr { -+ uint32_t rate; -+ int enabled; -+ enum dpdmux_accepted_frames_type accept_frame_type; -+}; -+ -+/** -+ * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs); -+ * @attr: Interface attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_if_attr *attr); -+ -+/** -+ * struct dpdmux_l2_rule - Structure representing L2 rule -+ * @mac_addr: MAC address -+ * @vlan_id: VLAN ID -+ */ -+struct dpdmux_l2_rule { -+ uint8_t mac_addr[6]; -+ uint16_t vlan_id; -+}; -+ -+/** -+ * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Destination interface ID -+ * @rule: L2 rule -+ * -+ * Function removes a L2 rule from DPDMUX table -+ * or adds an interface to an existing multicast address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule); -+ -+/** -+ * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMUX object -+ * @if_id: Destination interface ID -+ * @rule: L2 rule -+ * -+ * Function adds a L2 rule into DPDMUX table -+ * or adds an interface to an existing multicast address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpdmux_l2_rule *rule); -+ -+/** -+* dpdmux_if_get_counter() - Functions obtains specific counter of an interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPDMUX object -+* @if_id: Interface Id -+* @counter_type: counter type -+* @counter: Returned specific counter information -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpdmux_counter_type counter_type, -+ uint64_t *counter); -+ -+/** -+* dpdmux_ul_reset_counters() - Function resets the uplink counter -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPDMUX object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values -+ */ -+struct dpdmux_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpdmux_if_set_link_cfg() - set the link configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @cfg: Link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_cfg *cfg); -+/** -+ * struct dpdmux_link_state - Structure representing DPDMUX link state -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values -+ * @up: 0 - down, 1 - up -+ */ -+struct dpdmux_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpdmux_if_get_link_state - Return the link state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @state: link state -+ * -+ * @returns '0' on Success; Error code otherwise. -+ */ -+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpdmux_link_state *state); -+ -+#endif /* __FSL_DPDMUX_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h -new file mode 100644 -index 0000000..0a5cf17 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h -@@ -0,0 +1,256 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPDMUX_CMD_H -+#define _FSL_DPDMUX_CMD_H -+ -+/* DPDMUX Version */ -+#define DPDMUX_VER_MAJOR 5 -+#define DPDMUX_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPDMUX_CMDID_CLOSE 0x800 -+#define DPDMUX_CMDID_OPEN 0x806 -+#define DPDMUX_CMDID_CREATE 0x906 -+#define DPDMUX_CMDID_DESTROY 0x900 -+ -+#define DPDMUX_CMDID_ENABLE 0x002 -+#define DPDMUX_CMDID_DISABLE 0x003 -+#define DPDMUX_CMDID_GET_ATTR 0x004 -+#define DPDMUX_CMDID_RESET 0x005 -+#define DPDMUX_CMDID_IS_ENABLED 0x006 -+ -+#define DPDMUX_CMDID_SET_IRQ 0x010 -+#define DPDMUX_CMDID_GET_IRQ 0x011 -+#define DPDMUX_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPDMUX_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPDMUX_CMDID_SET_IRQ_MASK 0x014 -+#define DPDMUX_CMDID_GET_IRQ_MASK 0x015 -+#define DPDMUX_CMDID_GET_IRQ_STATUS 0x016 -+#define DPDMUX_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH 0x0a1 -+ -+#define DPDMUX_CMDID_UL_RESET_COUNTERS 0x0a3 -+ -+#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES 0x0a7 -+#define DPDMUX_CMDID_IF_GET_ATTR 0x0a8 -+ -+#define DPDMUX_CMDID_IF_ADD_L2_RULE 0x0b0 -+#define DPDMUX_CMDID_IF_REMOVE_L2_RULE 0x0b1 -+#define DPDMUX_CMDID_IF_GET_COUNTER 0x0b2 -+#define DPDMUX_CMDID_IF_SET_LINK_CFG 0x0b3 -+#define DPDMUX_CMDID_IF_GET_LINK_STATE 0x0b4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_OPEN(cmd, dpdmux_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmux_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, enum dpdmux_method, cfg->method);\ -+ MC_CMD_OP(cmd, 0, 8, 8, enum dpdmux_manip, cfg->manip);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_dmat_entries);\ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_mc_groups);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.max_vlan_ids);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) \ -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+#define DPDMUX_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 8, enum dpdmux_method, attr->method);\ -+ MC_RSP_OP(cmd, 0, 8, 8, enum dpdmux_manip, attr->manip);\ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->num_ifs);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->mem_size);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\ -+ MC_RSP_OP(cmd, 4, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 4, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpdmux_accepted_frames_type, cfg->type);\ -+ MC_CMD_OP(cmd, 0, 20, 4, enum dpdmux_unaccepted_frames_action, \ -+ cfg->unaccept_act);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_GET_ATTR(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IF_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 56, 4, enum dpdmux_accepted_frames_type, \ -+ attr->accept_frame_type);\ -+ MC_RSP_OP(cmd, 0, 24, 1, int, attr->enabled);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rate);\ -+} while (0) -+ -+#define DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, l2_rule) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\ -+} while (0) -+ -+#define DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, l2_rule) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, enum dpdmux_counter_type, counter_type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IF_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ -+} while (0) -+ -+#endif /* _FSL_DPDMUX_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpio.h b/drivers/net/dpaa2/mc/fsl_dpio.h -new file mode 100644 -index 0000000..88a492f ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpio.h -@@ -0,0 +1,460 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPIO_H -+#define __FSL_DPIO_H -+ -+/* Data Path I/O Portal API -+ * Contains initialization APIs and runtime control APIs for DPIO -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpio_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpio_id: DPIO unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpio_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpio_id, -+ uint16_t *token); -+ -+/** -+ * dpio_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpio_channel_mode - DPIO notification channel mode -+ * @DPIO_NO_CHANNEL: No support for notification channel -+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a -+ * dedicated channel in the DPIO; user should point the queue's -+ * destination in the relevant interface to this DPIO -+ */ -+enum dpio_channel_mode { -+ DPIO_NO_CHANNEL = 0, -+ DPIO_LOCAL_CHANNEL = 1, -+}; -+ -+/** -+ * struct dpio_cfg - Structure representing DPIO configuration -+ * @channel_mode: Notification channel mode -+ * @num_priorities: Number of priorities for the notification channel (1-8); -+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' -+ */ -+struct dpio_cfg { -+ enum dpio_channel_mode channel_mode; -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpio_create() - Create the DPIO object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPIO object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpio_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpio_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpio_destroy() - Destroy the DPIO object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_enable() - Enable the DPIO, allow I/O portal operations. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_is_enabled() - Check if the DPIO is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpio_reset() - Reset the DPIO, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_set_stashing_destination() - Set the stashing destination. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @sdest: stashing destination value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t sdest); -+ -+/** -+ * dpio_get_stashing_destination() - Get the stashing destination.. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @sdest: Returns the stashing destination value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t *sdest); -+ -+/** -+ * dpio_add_static_dequeue_channel() - Add a static dequeue channel. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @dpcon_id: DPCON object ID -+ * @channel_index: Returned channel index to be used in qbman API -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ uint8_t *channel_index); -+ -+/** -+ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @dpcon_id: DPCON object ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id); -+ -+/** -+ * DPIO IRQ Index and Events -+ */ -+ -+/** -+ * Irq software-portal index -+ */ -+#define DPIO_IRQ_SWP_INDEX 0 -+ -+/** -+ * struct dpio_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpio_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpio_set_irq() - Set IRQ information for the DPIO to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpio_irq_cfg *irq_cfg); -+ -+/** -+ * dpio_get_irq() - Get IRQ information from the DPIO. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpio_irq_cfg *irq_cfg); -+ -+/** -+ * dpio_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpio_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpio_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpio_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpio_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpio_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpio_attr - Structure representing DPIO attributes -+ * @id: DPIO object ID -+ * @version: DPIO version -+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area -+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area -+ * @qbman_portal_id: Software portal ID -+ * @channel_mode: Notification channel mode -+ * @num_priorities: Number of priorities for the notification channel (1-8); -+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' -+ * @qbman_version: QBMAN version -+ */ -+struct dpio_attr { -+ int id; -+ /** -+ * struct version - DPIO version -+ * @major: DPIO major version -+ * @minor: DPIO minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint64_t qbman_portal_ce_offset; -+ uint64_t qbman_portal_ci_offset; -+ uint16_t qbman_portal_id; -+ enum dpio_channel_mode channel_mode; -+ uint8_t num_priorities; -+ uint32_t qbman_version; -+}; -+ -+/** -+ * dpio_get_attributes() - Retrieve DPIO attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpio_attr *attr); -+#endif /* __FSL_DPIO_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpio_cmd.h b/drivers/net/dpaa2/mc/fsl_dpio_cmd.h -new file mode 100644 -index 0000000..f339cd6 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpio_cmd.h -@@ -0,0 +1,184 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPIO_CMD_H -+#define _FSL_DPIO_CMD_H -+ -+/* DPIO Version */ -+#define DPIO_VER_MAJOR 3 -+#define DPIO_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPIO_CMDID_CLOSE 0x800 -+#define DPIO_CMDID_OPEN 0x803 -+#define DPIO_CMDID_CREATE 0x903 -+#define DPIO_CMDID_DESTROY 0x900 -+ -+#define DPIO_CMDID_ENABLE 0x002 -+#define DPIO_CMDID_DISABLE 0x003 -+#define DPIO_CMDID_GET_ATTR 0x004 -+#define DPIO_CMDID_RESET 0x005 -+#define DPIO_CMDID_IS_ENABLED 0x006 -+ -+#define DPIO_CMDID_SET_IRQ 0x010 -+#define DPIO_CMDID_GET_IRQ 0x011 -+#define DPIO_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPIO_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPIO_CMDID_SET_IRQ_MASK 0x014 -+#define DPIO_CMDID_GET_IRQ_MASK 0x015 -+#define DPIO_CMDID_GET_IRQ_STATUS 0x016 -+#define DPIO_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPIO_CMDID_SET_STASHING_DEST 0x120 -+#define DPIO_CMDID_GET_STASHING_DEST 0x121 -+#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x122 -+#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x123 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_OPEN(cmd, dpio_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \ -+ cfg->channel_mode);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_portal_id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ -+ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode, attr->channel_mode);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->qbman_portal_ce_offset);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, attr->qbman_portal_ci_offset);\ -+ MC_RSP_OP(cmd, 3, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 3, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->qbman_version);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+#endif /* _FSL_DPIO_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h -new file mode 100644 -index 0000000..b2bceaf ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpkg.h -@@ -0,0 +1,174 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPKG_H_ -+#define __FSL_DPKG_H_ -+ -+#include -+ -+/* Data Path Key Generator API -+ * Contains initialization APIs and runtime APIs for the Key Generator -+ */ -+ -+/** Key Generator properties */ -+ -+/** -+ * Number of masks per key extraction -+ */ -+#define DPKG_NUM_OF_MASKS 4 -+/** -+ * Number of extractions per key profile -+ */ -+#define DPKG_MAX_NUM_OF_EXTRACTS 10 -+ -+/** -+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types -+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset -+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field -+ * @DPKG_FULL_FIELD: Extract a full field -+ */ -+enum dpkg_extract_from_hdr_type { -+ DPKG_FROM_HDR = 0, -+ DPKG_FROM_FIELD = 1, -+ DPKG_FULL_FIELD = 2 -+}; -+ -+/** -+ * enum dpkg_extract_type - Enumeration for selecting extraction type -+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header -+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header -+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; -+ * e.g. can be used to extract header existence; -+ * please refer to 'Parse Result definition' section in the parser BG -+ */ -+enum dpkg_extract_type { -+ DPKG_EXTRACT_FROM_HDR = 0, -+ DPKG_EXTRACT_FROM_DATA = 1, -+ DPKG_EXTRACT_FROM_PARSE = 3 -+}; -+ -+/** -+ * struct dpkg_mask - A structure for defining a single extraction mask -+ * @mask: Byte mask for the extracted content -+ * @offset: Offset within the extracted content -+ */ -+struct dpkg_mask { -+ uint8_t mask; -+ uint8_t offset; -+}; -+ -+/** -+ * struct dpkg_extract - A structure for defining a single extraction -+ * @type: Determines how the union below is interpreted: -+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; -+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; -+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' -+ * @extract: Selects extraction method -+ * @num_of_byte_masks: Defines the number of valid entries in the array below; -+ * This is also the number of bytes to be used as masks -+ * @masks: Masks parameters -+ */ -+struct dpkg_extract { -+ enum dpkg_extract_type type; -+ /** -+ * union extract - Selects extraction method -+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' -+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' -+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' -+ */ -+ union { -+ /** -+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' -+ * @prot: Any of the supported headers -+ * @type: Defines the type of header extraction: -+ * DPKG_FROM_HDR: use size & offset below; -+ * DPKG_FROM_FIELD: use field, size and offset below; -+ * DPKG_FULL_FIELD: use field below -+ * @field: One of the supported fields (NH_FLD_) -+ * -+ * @size: Size in bytes -+ * @offset: Byte offset -+ * @hdr_index: Clear for cases not listed below; -+ * Used for protocols that may have more than a single -+ * header, 0 indicates an outer header; -+ * Supported protocols (possible values): -+ * NET_PROT_VLAN (0, HDR_INDEX_LAST); -+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); -+ * NET_PROT_IP(0, HDR_INDEX_LAST); -+ * NET_PROT_IPv4(0, HDR_INDEX_LAST); -+ * NET_PROT_IPv6(0, HDR_INDEX_LAST); -+ */ -+ -+ struct { -+ enum net_prot prot; -+ enum dpkg_extract_from_hdr_type type; -+ uint32_t field; -+ uint8_t size; -+ uint8_t offset; -+ uint8_t hdr_index; -+ } from_hdr; -+ /** -+ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' -+ * @size: Size in bytes -+ * @offset: Byte offset -+ */ -+ struct { -+ uint8_t size; -+ uint8_t offset; -+ } from_data; -+ -+ /** -+ * struct from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' -+ * @size: Size in bytes -+ * @offset: Byte offset -+ */ -+ struct { -+ uint8_t size; -+ uint8_t offset; -+ } from_parse; -+ } extract; -+ -+ uint8_t num_of_byte_masks; -+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; -+}; -+ -+/** -+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation -+ * profile (rule) -+ * @num_extracts: Defines the number of valid entries in the array below -+ * @extracts: Array of required extractions -+ */ -+struct dpkg_profile_cfg { -+ uint8_t num_extracts; -+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; -+}; -+ -+#endif /* __FSL_DPKG_H_ */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmac.h b/drivers/net/dpaa2/mc/fsl_dpmac.h -new file mode 100644 -index 0000000..ad27772 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmac.h -@@ -0,0 +1,593 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMAC_H -+#define __FSL_DPMAC_H -+ -+/* Data Path MAC API -+ * Contains initialization APIs and runtime control APIs for DPMAC -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpmac_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpmac_id: DPMAC unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpmac_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmac_id, -+ uint16_t *token); -+ -+/** -+ * dpmac_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpmac_link_type - DPMAC link type -+ * @DPMAC_LINK_TYPE_NONE: No link -+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type -+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID -+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type -+ */ -+enum dpmac_link_type { -+ DPMAC_LINK_TYPE_NONE, -+ DPMAC_LINK_TYPE_FIXED, -+ DPMAC_LINK_TYPE_PHY, -+ DPMAC_LINK_TYPE_BACKPLANE -+}; -+ -+/** -+ * enum dpmac_eth_if - DPMAC Ethrnet interface -+ * @DPMAC_ETH_IF_MII: MII interface -+ * @DPMAC_ETH_IF_RMII: RMII interface -+ * @DPMAC_ETH_IF_SMII: SMII interface -+ * @DPMAC_ETH_IF_GMII: GMII interface -+ * @DPMAC_ETH_IF_RGMII: RGMII interface -+ * @DPMAC_ETH_IF_SGMII: SGMII interface -+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface -+ * @DPMAC_ETH_IF_XAUI: XAUI interface -+ * @DPMAC_ETH_IF_XFI: XFI interface -+ */ -+enum dpmac_eth_if { -+ DPMAC_ETH_IF_MII, -+ DPMAC_ETH_IF_RMII, -+ DPMAC_ETH_IF_SMII, -+ DPMAC_ETH_IF_GMII, -+ DPMAC_ETH_IF_RGMII, -+ DPMAC_ETH_IF_SGMII, -+ DPMAC_ETH_IF_QSGMII, -+ DPMAC_ETH_IF_XAUI, -+ DPMAC_ETH_IF_XFI -+}; -+ -+/** -+ * struct dpmac_cfg - Structure representing DPMAC configuration -+ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, -+ * the MAC IDs are continuous. -+ * For example: 2 WRIOPs, 16 MACs in each: -+ * MAC IDs for the 1st WRIOP: 1-16, -+ * MAC IDs for the 2nd WRIOP: 17-32. -+ */ -+struct dpmac_cfg { -+ int mac_id; -+}; -+ -+/** -+ * dpmac_create() - Create the DPMAC object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPMAC object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpmac_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpmac_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPMAC IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPMAC_IRQ_INDEX 0 -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 -+/** -+ * IRQ event - Indicates that the link state changed -+ */ -+#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 -+ -+/** -+ * struct dpmac_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpmac_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmac_irq_cfg *irq_cfg); -+ -+/** -+ * dpmac_get_irq() - Get IRQ information from the DPMAC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmac_irq_cfg *irq_cfg); -+ -+/** -+ * dpmac_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpmac_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpmac_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpmac_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpmac_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpmac_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpmac_attr - Structure representing DPMAC attributes -+ * @id: DPMAC object ID -+ * @phy_id: PHY ID -+ * @link_type: link type -+ * @eth_if: Ethernet interface -+ * @max_rate: Maximum supported rate - in Mbps -+ * @version: DPMAC version -+ */ -+struct dpmac_attr { -+ int id; -+ int phy_id; -+ enum dpmac_link_type link_type; -+ enum dpmac_eth_if eth_if; -+ uint32_t max_rate; -+ /** -+ * struct version - Structure representing DPMAC version -+ * @major: DPMAC major version -+ * @minor: DPMAC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpmac_get_attributes - Retrieve DPMAC attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_attr *attr); -+ -+/** -+ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters -+ * @phy_addr: MDIO device address -+ * @reg: Address of the register within the Clause 45 PHY device from which data -+ * is to be read -+ * @data: Data read/write from/to MDIO -+ */ -+struct dpmac_mdio_cfg { -+ uint8_t phy_addr; -+ uint8_t reg; -+ uint16_t data; -+}; -+ -+/** -+ * dpmac_mdio_read() - Perform MDIO read transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/** -+ * dpmac_mdio_write() - Perform MDIO write transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/** -+ * DPMAC link configuration/state options -+ */ -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpmac_link_cfg - Structure representing DPMAC link configuration -+ * @rate: Link's rate - in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ */ -+struct dpmac_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpmac_get_link_cfg() - Get Ethernet link configuration -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Returned structure with the link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_cfg *cfg); -+ -+/** -+ * struct dpmac_link_state - DPMAC link configuration request -+ * @rate: Rate in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ * @up: Link state -+ */ -+struct dpmac_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpmac_set_link_state() - Set the Ethernet link status -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @link_state: Link state configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_state *link_state); -+ -+/** -+ * enum dpmac_counter - DPMAC counter types -+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger -+ * (up to max frame length specified), -+ * good or bad. -+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received -+ * with a wrong CRC -+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length -+ * specified, with a bad frame check sequence. -+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors. -+ * Occurs when a receive FIFO overflows. -+ * Includes also frames truncated as a result of -+ * the receive FIFO overflow. -+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error -+ * (optional used for wrong SFD). -+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64 -+ * bytes long with a good CRC. -+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length -+ * specified, with a good frame check sequence. -+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC) -+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted -+ * (regular and PFC). -+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid -+ * frames and valid pause frames. -+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames. -+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames. -+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received. -+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames. -+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error -+ * (except for undersized/fragment frame). -+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid -+ * frames and valid pause frames transmitted. -+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames. -+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames. -+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames. -+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error. -+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including -+ * pause frames. -+ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including -+ * pause frames. -+ */ -+enum dpmac_counter { -+ DPMAC_CNT_ING_FRAME_64, -+ DPMAC_CNT_ING_FRAME_127, -+ DPMAC_CNT_ING_FRAME_255, -+ DPMAC_CNT_ING_FRAME_511, -+ DPMAC_CNT_ING_FRAME_1023, -+ DPMAC_CNT_ING_FRAME_1518, -+ DPMAC_CNT_ING_FRAME_1519_MAX, -+ DPMAC_CNT_ING_FRAG, -+ DPMAC_CNT_ING_JABBER, -+ DPMAC_CNT_ING_FRAME_DISCARD, -+ DPMAC_CNT_ING_ALIGN_ERR, -+ DPMAC_CNT_EGR_UNDERSIZED, -+ DPMAC_CNT_ING_OVERSIZED, -+ DPMAC_CNT_ING_VALID_PAUSE_FRAME, -+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, -+ DPMAC_CNT_ING_BYTE, -+ DPMAC_CNT_ING_MCAST_FRAME, -+ DPMAC_CNT_ING_BCAST_FRAME, -+ DPMAC_CNT_ING_ALL_FRAME, -+ DPMAC_CNT_ING_UCAST_FRAME, -+ DPMAC_CNT_ING_ERR_FRAME, -+ DPMAC_CNT_EGR_BYTE, -+ DPMAC_CNT_EGR_MCAST_FRAME, -+ DPMAC_CNT_EGR_BCAST_FRAME, -+ DPMAC_CNT_EGR_UCAST_FRAME, -+ DPMAC_CNT_EGR_ERR_FRAME, -+ DPMAC_CNT_ING_GOOD_FRAME, -+ DPMAC_CNT_ENG_GOOD_FRAME -+}; -+ -+/** -+ * dpmac_get_counter() - Read a specific DPMAC counter -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @type: The requested counter -+ * @counter: Returned counter value -+ * -+ * Return: The requested counter; '0' otherwise. -+ */ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter); -+ -+#endif /* __FSL_DPMAC_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h -new file mode 100644 -index 0000000..dc00590 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h -@@ -0,0 +1,195 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPMAC_CMD_H -+#define _FSL_DPMAC_CMD_H -+ -+/* DPMAC Version */ -+#define DPMAC_VER_MAJOR 3 -+#define DPMAC_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPMAC_CMDID_CLOSE 0x800 -+#define DPMAC_CMDID_OPEN 0x80c -+#define DPMAC_CMDID_CREATE 0x90c -+#define DPMAC_CMDID_DESTROY 0x900 -+ -+#define DPMAC_CMDID_GET_ATTR 0x004 -+#define DPMAC_CMDID_RESET 0x005 -+ -+#define DPMAC_CMDID_SET_IRQ 0x010 -+#define DPMAC_CMDID_GET_IRQ 0x011 -+#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPMAC_CMDID_SET_IRQ_MASK 0x014 -+#define DPMAC_CMDID_GET_IRQ_MASK 0x015 -+#define DPMAC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPMAC_CMDID_MDIO_READ 0x0c0 -+#define DPMAC_CMDID_MDIO_WRITE 0x0c1 -+#define DPMAC_CMDID_GET_LINK_CFG 0x0c2 -+#define DPMAC_CMDID_SET_LINK_STATE 0x0c3 -+#define DPMAC_CMDID_GET_COUNTER 0x0c4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_OPEN(cmd, dpmac_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\ -+ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_READ(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_MDIO_READ(cmd, data) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_COUNTER(cmd, type) \ -+ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+#endif /* _FSL_DPMAC_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmcp.h b/drivers/net/dpaa2/mc/fsl_dpmcp.h -new file mode 100644 -index 0000000..80f238e ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmcp.h -@@ -0,0 +1,332 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMCP_H -+#define __FSL_DPMCP_H -+ -+/* Data Path Management Command Portal API -+ * Contains initialization APIs and runtime control APIs for DPMCP -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpmcp_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpmcp_id: DPMCP unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpmcp_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmcp_id, -+ uint16_t *token); -+ -+/** -+ * Get portal ID from pool -+ */ -+#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1) -+ -+/** -+ * dpmcp_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpmcp_cfg - Structure representing DPMCP configuration -+ * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID -+ * from pool -+ */ -+struct dpmcp_cfg { -+ int portal_id; -+}; -+ -+/** -+ * dpmcp_create() - Create the DPMCP object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPMCP object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpmcp_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmcp_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpmcp_destroy() - Destroy the DPMCP object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpmcp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * IRQ -+ */ -+ -+/** -+ * IRQ Index -+ */ -+#define DPMCP_IRQ_INDEX 0 -+/** -+ * irq event - Indicates that the link state changed -+ */ -+#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001 -+ -+/** -+ * struct dpmcp_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpmcp_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmcp_irq_cfg *irq_cfg); -+ -+/** -+ * dpmcp_get_irq() - Get IRQ information from the DPMCP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmcp_irq_cfg *irq_cfg); -+ -+/** -+ * dpmcp_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpmcp_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpmcp_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpmcp_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpmcp_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * struct dpmcp_attr - Structure representing DPMCP attributes -+ * @id: DPMCP object ID -+ * @version: DPMCP version -+ */ -+struct dpmcp_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPMCP version -+ * @major: DPMCP major version -+ * @minor: DPMCP minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpmcp_get_attributes - Retrieve DPMCP attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmcp_attr *attr); -+ -+#endif /* __FSL_DPMCP_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h -new file mode 100644 -index 0000000..8f710bd ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h -@@ -0,0 +1,135 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPMCP_CMD_H -+#define _FSL_DPMCP_CMD_H -+ -+/* DPMCP Version */ -+#define DPMCP_VER_MAJOR 3 -+#define DPMCP_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPMCP_CMDID_CLOSE 0x800 -+#define DPMCP_CMDID_OPEN 0x80b -+#define DPMCP_CMDID_CREATE 0x90b -+#define DPMCP_CMDID_DESTROY 0x900 -+ -+#define DPMCP_CMDID_GET_ATTR 0x004 -+#define DPMCP_CMDID_RESET 0x005 -+ -+#define DPMCP_CMDID_SET_IRQ 0x010 -+#define DPMCP_CMDID_GET_IRQ 0x011 -+#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPMCP_CMDID_SET_IRQ_MASK 0x014 -+#define DPMCP_CMDID_GET_IRQ_MASK 0x015 -+#define DPMCP_CMDID_GET_IRQ_STATUS 0x016 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_OPEN(cmd, dpmcp_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmcp_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->portal_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMCP_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+#endif /* _FSL_DPMCP_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmng.h b/drivers/net/dpaa2/mc/fsl_dpmng.h -new file mode 100644 -index 0000000..4468dea ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmng.h -@@ -0,0 +1,74 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMNG_H -+#define __FSL_DPMNG_H -+ -+/* Management Complex General API -+ * Contains general API for the Management Complex firmware -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * Management Complex firmware version information -+ */ -+#define MC_VER_MAJOR 9 -+#define MC_VER_MINOR 0 -+ -+/** -+ * struct mc_versoin -+ * @major: Major version number: incremented on API compatibility changes -+ * @minor: Minor version number: incremented on API additions (that are -+ * backward compatible); reset when major version is incremented -+ * @revision: Internal revision number: incremented on implementation changes -+ * and/or bug fixes that have no impact on API -+ */ -+struct mc_version { -+ uint32_t major; -+ uint32_t minor; -+ uint32_t revision; -+}; -+ -+/** -+ * mc_get_version() - Retrieves the Management Complex firmware -+ * version information -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @mc_ver_info: Returned version information structure -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int mc_get_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ struct mc_version *mc_ver_info); -+ -+#endif /* __FSL_DPMNG_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h -new file mode 100644 -index 0000000..c34ca3a ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h -@@ -0,0 +1,46 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMNG_CMD_H -+#define __FSL_DPMNG_CMD_H -+ -+/* Command IDs */ -+#define DPMNG_CMDID_GET_VERSION 0x831 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMNG_RSP_GET_VERSION(cmd, mc_ver_info) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mc_ver_info->revision); \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, mc_ver_info->major); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, mc_ver_info->minor); \ -+} while (0) -+ -+#endif /* __FSL_DPMNG_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h -new file mode 100644 -index 0000000..c820086 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpni.h -@@ -0,0 +1,2581 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPNI_H -+#define __FSL_DPNI_H -+ -+#include -+ -+struct fsl_mc_io; -+ -+/** -+ * Data Path Network Interface API -+ * Contains initialization APIs and runtime control APIs for DPNI -+ */ -+ -+/** General DPNI macros */ -+ -+/** -+ * Maximum number of traffic classes -+ */ -+#define DPNI_MAX_TC 8 -+/** -+ * Maximum number of buffer pools per DPNI -+ */ -+#define DPNI_MAX_DPBP 8 -+/** -+ * Maximum number of storage-profiles per DPNI -+ */ -+#define DPNI_MAX_SP 2 -+ -+/** -+ * All traffic classes considered; see dpni_set_rx_flow() -+ */ -+#define DPNI_ALL_TCS (uint8_t)(-1) -+/** -+ * All flows within traffic class considered; see dpni_set_rx_flow() -+ */ -+#define DPNI_ALL_TC_FLOWS (uint16_t)(-1) -+/** -+ * Generate new flow ID; see dpni_set_tx_flow() -+ */ -+#define DPNI_NEW_FLOW_ID (uint16_t)(-1) -+/* use for common tx-conf queue; see dpni_set_tx_conf_() */ -+#define DPNI_COMMON_TX_CONF (uint16_t)(-1) -+ -+/** -+ * dpni_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpni_id: DPNI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpni_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpni_id, -+ uint16_t *token); -+ -+/** -+ * dpni_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/* DPNI configuration options */ -+ -+/** -+ * Allow different distribution key profiles for different traffic classes; -+ * if not set, a single key profile is assumed -+ */ -+#define DPNI_OPT_ALLOW_DIST_KEY_PER_TC 0x00000001 -+ -+/** -+ * Disable all non-error transmit confirmation; error frames are reported -+ * back to a common Tx error queue -+ */ -+#define DPNI_OPT_TX_CONF_DISABLED 0x00000002 -+ -+/** -+ * Disable per-sender private Tx confirmation/error queue -+ */ -+#define DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED 0x00000004 -+ -+/** -+ * Support distribution based on hashed key; -+ * allows statistical distribution over receive queues in a traffic class -+ */ -+#define DPNI_OPT_DIST_HASH 0x00000010 -+ -+/** -+ * DEPRECATED - if this flag is selected and and all new 'max_fs_entries' are -+ * '0' then backward compatibility is preserved; -+ * Support distribution based on flow steering; -+ * allows explicit control of distribution over receive queues in a traffic -+ * class -+ */ -+#define DPNI_OPT_DIST_FS 0x00000020 -+ -+/** -+ * Unicast filtering support -+ */ -+#define DPNI_OPT_UNICAST_FILTER 0x00000080 -+/** -+ * Multicast filtering support -+ */ -+#define DPNI_OPT_MULTICAST_FILTER 0x00000100 -+/** -+ * VLAN filtering support -+ */ -+#define DPNI_OPT_VLAN_FILTER 0x00000200 -+/** -+ * Support IP reassembly on received packets -+ */ -+#define DPNI_OPT_IPR 0x00000800 -+/** -+ * Support IP fragmentation on transmitted packets -+ */ -+#define DPNI_OPT_IPF 0x00001000 -+/** -+ * VLAN manipulation support -+ */ -+#define DPNI_OPT_VLAN_MANIPULATION 0x00010000 -+/** -+ * Support masking of QoS lookup keys -+ */ -+#define DPNI_OPT_QOS_MASK_SUPPORT 0x00020000 -+/** -+ * Support masking of Flow Steering lookup keys -+ */ -+#define DPNI_OPT_FS_MASK_SUPPORT 0x00040000 -+ -+/** -+ * struct dpni_extended_cfg - Structure representing extended DPNI configuration -+ * @tc_cfg: TCs configuration -+ * @ipr_cfg: IP reassembly configuration -+ */ -+struct dpni_extended_cfg { -+ /** -+ * struct tc_cfg - TC configuration -+ * @max_dist: Maximum distribution size for Rx traffic class; -+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, -+ * 112,128,192,224,256,384,448,512,768,896,1024; -+ * value '0' will be treated as '1'. -+ * other unsupported values will be round down to the nearest -+ * supported value. -+ * @max_fs_entries: Maximum FS entries for Rx traffic class; -+ * '0' means no support for this TC; -+ */ -+ struct { -+ uint16_t max_dist; -+ uint16_t max_fs_entries; -+ } tc_cfg[DPNI_MAX_TC]; -+ /** -+ * struct ipr_cfg - Structure representing IP reassembly configuration -+ * @max_reass_frm_size: Maximum size of the reassembled frame -+ * @min_frag_size_ipv4: Minimum fragment size of IPv4 fragments -+ * @min_frag_size_ipv6: Minimum fragment size of IPv6 fragments -+ * @max_open_frames_ipv4: Maximum concurrent IPv4 packets in reassembly -+ * process -+ * @max_open_frames_ipv6: Maximum concurrent IPv6 packets in reassembly -+ * process -+ */ -+ struct { -+ uint16_t max_reass_frm_size; -+ uint16_t min_frag_size_ipv4; -+ uint16_t min_frag_size_ipv6; -+ uint16_t max_open_frames_ipv4; -+ uint16_t max_open_frames_ipv6; -+ } ipr_cfg; -+}; -+ -+/** -+ * dpni_prepare_extended_cfg() - function prepare extended parameters -+ * @cfg: extended structure -+ * @ext_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpni_create() -+ */ -+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, -+ uint8_t *ext_cfg_buf); -+ -+/** -+ * struct dpni_cfg - Structure representing DPNI configuration -+ * @mac_addr: Primary MAC address -+ * @adv: Advanced parameters; default is all zeros; -+ * use this structure to change default settings -+ */ -+struct dpni_cfg { -+ uint8_t mac_addr[6]; -+ /** -+ * struct adv - Advanced parameters -+ * @options: Mask of available options; use 'DPNI_OPT_' values -+ * @start_hdr: Selects the packet starting header for parsing; -+ * 'NET_PROT_NONE' is treated as default: 'NET_PROT_ETH' -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; Non-power-of-2 values are rounded -+ * up to the next power-of-2 value as hardware demands it; -+ * '0' will be treated as '1' -+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx); -+ * '0' will e treated as '1' -+ * @max_unicast_filters: Maximum number of unicast filters; -+ * '0' is treated as '16' -+ * @max_multicast_filters: Maximum number of multicast filters; -+ * '0' is treated as '64' -+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in -+ * the QoS table; '0' is treated as '64' -+ * @max_qos_key_size: Maximum key size for the QoS look-up; -+ * '0' is treated as '24' which is enough for IPv4 -+ * 5-tuple -+ * @max_dist_key_size: Maximum key size for the distribution; -+ * '0' is treated as '24' which is enough for IPv4 5-tuple -+ * @max_policers: Maximum number of policers; -+ * should be between '0' and max_tcs -+ * @max_congestion_ctrl: Maximum number of congestion control groups -+ * (CGs); covers early drop and congestion notification -+ * requirements; -+ * should be between '0' and ('max_tcs' + 'max_senders') -+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory -+ * filled with the extended configuration by calling -+ * dpni_prepare_extended_cfg() -+ */ -+ struct { -+ uint32_t options; -+ enum net_prot start_hdr; -+ uint8_t max_senders; -+ uint8_t max_tcs; -+ uint8_t max_unicast_filters; -+ uint8_t max_multicast_filters; -+ uint8_t max_vlan_filters; -+ uint8_t max_qos_entries; -+ uint8_t max_qos_key_size; -+ uint8_t max_dist_key_size; -+ uint8_t max_policers; -+ uint8_t max_congestion_ctrl; -+ uint64_t ext_cfg_iova; -+ } adv; -+}; -+ -+/** -+ * dpni_create() - Create the DPNI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPNI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpni_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpni_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpni_destroy() - Destroy the DPNI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpni_pools_cfg - Structure representing buffer pools configuration -+ * @num_dpbp: Number of DPBPs -+ * @pools: Array of buffer pools parameters; The number of valid entries -+ * must match 'num_dpbp' value -+ */ -+struct dpni_pools_cfg { -+ uint8_t num_dpbp; -+ /** -+ * struct pools - Buffer pools parameters -+ * @dpbp_id: DPBP object ID -+ * @buffer_size: Buffer size -+ * @backup_pool: Backup pool -+ */ -+ struct { -+ int dpbp_id; -+ uint16_t buffer_size; -+ int backup_pool; -+ } pools[DPNI_MAX_DPBP]; -+}; -+ -+/** -+ * dpni_set_pools() - Set buffer pools configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Buffer pools configuration -+ * -+ * mandatory for DPNI operation -+ * warning:Allowed only when DPNI is disabled -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_pools_cfg *cfg); -+ -+/** -+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_is_enabled() - Check if the DPNI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_reset() - Reset the DPNI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPNI IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPNI_IRQ_INDEX 0 -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 -+ -+/** -+ * struct dpni_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpni_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpni_set_irq() - Set IRQ information for the DPNI to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpni_irq_cfg *irq_cfg); -+ -+/** -+ * dpni_get_irq() - Get IRQ information from the DPNI. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpni_irq_cfg *irq_cfg); -+ -+/** -+ * dpni_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state: - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpni_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpni_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpni_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpni_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpni_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpni_attr - Structure representing DPNI attributes -+ * @id: DPNI object ID -+ * @version: DPNI version -+ * @start_hdr: Indicates the packet starting header for parsing -+ * @options: Mask of available options; reflects the value as was given in -+ * object's creation -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; -+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx) -+ * @max_unicast_filters: Maximum number of unicast filters -+ * @max_multicast_filters: Maximum number of multicast filters -+ * @max_vlan_filters: Maximum number of VLAN filters -+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in QoS table -+ * @max_qos_key_size: Maximum key size for the QoS look-up -+ * @max_dist_key_size: Maximum key size for the distribution look-up -+ * @max_policers: Maximum number of policers; -+ * @max_congestion_ctrl: Maximum number of congestion control groups (CGs); -+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory; -+ * call dpni_extract_extended_cfg() to extract the extended configuration -+ */ -+struct dpni_attr { -+ int id; -+ /** -+ * struct version - DPNI version -+ * @major: DPNI major version -+ * @minor: DPNI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ enum net_prot start_hdr; -+ uint32_t options; -+ uint8_t max_senders; -+ uint8_t max_tcs; -+ uint8_t max_unicast_filters; -+ uint8_t max_multicast_filters; -+ uint8_t max_vlan_filters; -+ uint8_t max_qos_entries; -+ uint8_t max_qos_key_size; -+ uint8_t max_dist_key_size; -+ uint8_t max_policers; -+ uint8_t max_congestion_ctrl; -+ uint64_t ext_cfg_iova; -+}; -+ -+/** -+ * dpni_get_attributes() - Retrieve DPNI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @attr: Object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_attr *attr); -+ -+/** -+ * dpni_extract_extended_cfg() - extract the extended parameters -+ * @cfg: extended structure -+ * @ext_cfg_buf: 256 bytes of DMA-able memory -+ * -+ * This function has to be called after dpni_get_attributes() -+ */ -+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, -+ const uint8_t *ext_cfg_buf); -+ -+/** -+ * DPNI errors -+ */ -+ -+/** -+ * Extract out of frame header error -+ */ -+#define DPNI_ERROR_EOFHE 0x00020000 -+/** -+ * Frame length error -+ */ -+#define DPNI_ERROR_FLE 0x00002000 -+/** -+ * Frame physical error -+ */ -+#define DPNI_ERROR_FPE 0x00001000 -+/** -+ * Parsing header error -+ */ -+#define DPNI_ERROR_PHE 0x00000020 -+/** -+ * Parser L3 checksum error -+ */ -+#define DPNI_ERROR_L3CE 0x00000004 -+/** -+ * Parser L3 checksum error -+ */ -+#define DPNI_ERROR_L4CE 0x00000001 -+ -+/** -+ * enum dpni_error_action - Defines DPNI behavior for errors -+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame -+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow -+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue -+ */ -+enum dpni_error_action { -+ DPNI_ERROR_ACTION_DISCARD = 0, -+ DPNI_ERROR_ACTION_CONTINUE = 1, -+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 -+}; -+ -+/** -+ * struct dpni_error_cfg - Structure representing DPNI errors treatment -+ * @errors: Errors mask; use 'DPNI_ERROR__ -+ * @error_action: The desired action for the errors mask -+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation -+ * status (FAS); relevant only for the non-discard action -+ */ -+struct dpni_error_cfg { -+ uint32_t errors; -+ enum dpni_error_action error_action; -+ int set_frame_annotation; -+}; -+ -+/** -+ * dpni_set_errors_behavior() - Set errors behavior -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Errors configuration -+ * -+ * this function may be called numerous times with different -+ * error masks -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_error_cfg *cfg); -+ -+/** -+ * DPNI buffer layout modification options -+ */ -+ -+/** -+ * Select to modify the time-stamp setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 -+/** -+ * Select to modify the parser-result setting; not applicable for Tx -+ */ -+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 -+/** -+ * Select to modify the frame-status setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 -+/** -+ * Select to modify the private-data-size setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 -+/** -+ * Select to modify the data-alignment setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 -+/** -+ * Select to modify the data-head-room setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 -+/** -+ * Select to modify the data-tail-room setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 -+ -+/** -+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout -+ * @options: Flags representing the suggested modifications to the buffer -+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_' flags -+ * @pass_timestamp: Pass timestamp value -+ * @pass_parser_result: Pass parser results -+ * @pass_frame_status: Pass frame status -+ * @private_data_size: Size kept for private data (in bytes) -+ * @data_align: Data alignment -+ * @data_head_room: Data head room -+ * @data_tail_room: Data tail room -+ */ -+struct dpni_buffer_layout { -+ uint32_t options; -+ int pass_timestamp; -+ int pass_parser_result; -+ int pass_frame_status; -+ uint16_t private_data_size; -+ uint16_t data_align; -+ uint16_t data_head_room; -+ uint16_t data_tail_room; -+}; -+ -+/** -+ * dpni_get_rx_buffer_layout() - Retrieve Rx buffer layout attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_rx_buffer_layout() - Set Rx buffer layout configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_get_tx_buffer_layout() - Retrieve Tx buffer layout attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_tx_buffer_layout() - Set Tx buffer layout configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_get_tx_conf_buffer_layout() - Retrieve Tx confirmation buffer layout -+ * attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_tx_conf_buffer_layout() - Set Tx confirmation buffer layout -+ * configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_l3_chksum_validation() - Enable/disable L3 checksum validation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_l3_chksum_validation() - Get L3 checksum validation mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_l4_chksum_validation() - Enable/disable L4 checksum validation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_l4_chksum_validation() - Get L4 checksum validation mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used -+ * for enqueue operations -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @qdid: Returned virtual QDID value that should be used as an argument -+ * in all enqueue operations -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_qdid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *qdid); -+ -+/** -+ * struct dpni_sp_info - Structure representing DPNI storage-profile information -+ * (relevant only for DPNI owned by AIOP) -+ * @spids: array of storage-profiles -+ */ -+struct dpni_sp_info { -+ uint16_t spids[DPNI_MAX_SP]; -+}; -+ -+/** -+ * dpni_get_spids() - Get the AIOP storage profile IDs associated with the DPNI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @sp_info: Returned AIOP storage-profile information -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Only relevant for DPNI that belongs to AIOP container. -+ */ -+int dpni_get_sp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_sp_info *sp_info); -+ -+/** -+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @data_offset: Tx data offset (from start of buffer) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *data_offset); -+ -+/** -+ * enum dpni_counter - DPNI counter types -+ * @DPNI_CNT_ING_FRAME: Counts ingress frames -+ * @DPNI_CNT_ING_BYTE: Counts ingress bytes -+ * @DPNI_CNT_ING_FRAME_DROP: Counts ingress frames dropped due to explicit -+ * 'drop' setting -+ * @DPNI_CNT_ING_FRAME_DISCARD: Counts ingress frames discarded due to errors -+ * @DPNI_CNT_ING_MCAST_FRAME: Counts ingress multicast frames -+ * @DPNI_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes -+ * @DPNI_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames -+ * @DPNI_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes -+ * @DPNI_CNT_EGR_FRAME: Counts egress frames -+ * @DPNI_CNT_EGR_BYTE: Counts egress bytes -+ * @DPNI_CNT_EGR_FRAME_DISCARD: Counts egress frames discarded due to errors -+ */ -+enum dpni_counter { -+ DPNI_CNT_ING_FRAME = 0x0, -+ DPNI_CNT_ING_BYTE = 0x1, -+ DPNI_CNT_ING_FRAME_DROP = 0x2, -+ DPNI_CNT_ING_FRAME_DISCARD = 0x3, -+ DPNI_CNT_ING_MCAST_FRAME = 0x4, -+ DPNI_CNT_ING_MCAST_BYTE = 0x5, -+ DPNI_CNT_ING_BCAST_FRAME = 0x6, -+ DPNI_CNT_ING_BCAST_BYTES = 0x7, -+ DPNI_CNT_EGR_FRAME = 0x8, -+ DPNI_CNT_EGR_BYTE = 0x9, -+ DPNI_CNT_EGR_FRAME_DISCARD = 0xa -+}; -+ -+/** -+ * dpni_get_counter() - Read a specific DPNI counter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @counter: The requested counter -+ * @value: Returned counter's current value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t *value); -+ -+/** -+ * dpni_set_counter() - Set (or clear) a specific DPNI counter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @counter: The requested counter -+ * @value: New counter value; typically pass '0' for resetting -+ * the counter. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t value); -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct - Structure representing DPNI link configuration -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values -+ */ -+struct dpni_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpni_set_link_cfg() - set the link configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_link_cfg *cfg); -+ -+/** -+ * struct dpni_link_state - Structure representing DPNI link state -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values -+ * @up: Link state; '0' for down, '1' for up -+ */ -+struct dpni_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpni_get_link_state() - Return the link state (either up or down) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @state: Returned link state; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_link_state *state); -+ -+/** -+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration -+ * @rate_limit: rate in Mbps -+ * @max_burst_size: burst size in bytes (up to 64KB) -+ */ -+struct dpni_tx_shaping_cfg { -+ uint32_t rate_limit; -+ uint16_t max_burst_size; -+}; -+ -+/** -+ * dpni_set_tx_shaping() - Set the transmit shaping -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tx_shaper: tx shaping configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_shaping_cfg *tx_shaper); -+ -+/** -+ * dpni_set_max_frame_length() - Set the maximum received frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @max_frame_length: Maximum received frame length (in -+ * bytes); frame is discarded if its -+ * length exceeds this value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length); -+ -+/** -+ * dpni_get_max_frame_length() - Get the maximum received frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @max_frame_length: Maximum received frame length (in -+ * bytes); frame is discarded if its -+ * length exceeds this value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *max_frame_length); -+ -+/** -+ * dpni_set_mtu() - Set the MTU for the interface. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mtu: MTU length (in bytes) -+ * -+ * MTU determines the maximum fragment size for performing IP -+ * fragmentation on egress packets. -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t mtu); -+ -+/** -+ * dpni_get_mtu() - Get the MTU. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mtu: Returned MTU length (in bytes) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *mtu); -+ -+/** -+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_primary_mac_addr() - Set the primary MAC address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to set as primary address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_get_primary_mac_addr() - Get the primary MAC address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: Returned MAC address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t mac_addr[6]); -+ -+/** -+ * dpni_add_mac_addr() - Add MAC address filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to add -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_remove_mac_addr() - Remove MAC address filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @unicast: Set to '1' to clear unicast addresses -+ * @multicast: Set to '1' to clear multicast addresses -+ * -+ * The primary MAC address is not cleared by this operation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast); -+ -+/** -+ * dpni_set_vlan_filters() - Enable/disable VLAN filtering mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_add_vlan_id() - Add VLAN ID filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @vlan_id: VLAN ID to add -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * dpni_remove_vlan_id() - Remove VLAN ID filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @vlan_id: VLAN ID to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * dpni_clear_vlan_filters() - Clear all VLAN filters -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode -+ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority -+ * @DPNI_TX_SCHED_WEIGHTED: weighted based scheduling -+ */ -+enum dpni_tx_schedule_mode { -+ DPNI_TX_SCHED_STRICT_PRIORITY, -+ DPNI_TX_SCHED_WEIGHTED, -+}; -+ -+/** -+ * struct dpni_tx_schedule_cfg - Structure representing Tx -+ * scheduling configuration -+ * @mode: scheduling mode -+ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000; -+ * not applicable for 'strict-priority' mode; -+ */ -+struct dpni_tx_schedule_cfg { -+ enum dpni_tx_schedule_mode mode; -+ uint16_t delta_bandwidth; -+}; -+ -+/** -+ * struct dpni_tx_selection_cfg - Structure representing transmission -+ * selection configuration -+ * @tc_sched: an array of traffic-classes -+ */ -+struct dpni_tx_selection_cfg { -+ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC]; -+}; -+ -+/** -+ * dpni_set_tx_selection() - Set transmission selection configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: transmission selection configuration -+ * -+ * warning: Allowed only when DPNI is disabled -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_selection_cfg *cfg); -+ -+/** -+ * enum dpni_dist_mode - DPNI distribution mode -+ * @DPNI_DIST_MODE_NONE: No distribution -+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if -+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation -+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if -+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation -+ */ -+enum dpni_dist_mode { -+ DPNI_DIST_MODE_NONE = 0, -+ DPNI_DIST_MODE_HASH = 1, -+ DPNI_DIST_MODE_FS = 2 -+}; -+ -+/** -+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action -+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame -+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id -+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash -+ */ -+enum dpni_fs_miss_action { -+ DPNI_FS_MISS_DROP = 0, -+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1, -+ DPNI_FS_MISS_HASH = 2 -+}; -+ -+/** -+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration -+ * @miss_action: Miss action selection -+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' -+ */ -+struct dpni_fs_tbl_cfg { -+ enum dpni_fs_miss_action miss_action; -+ uint16_t default_flow_id; -+}; -+ -+/** -+ * dpni_prepare_key_cfg() - function prepare extract parameters -+ * @cfg: defining a full Key Generation profile (rule) -+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before the following functions: -+ * - dpni_set_rx_tc_dist() -+ * - dpni_set_qos_table() -+ */ -+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, -+ uint8_t *key_cfg_buf); -+ -+/** -+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration -+ * @dist_size: Set the distribution size; -+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, -+ * 112,128,192,224,256,384,448,512,768,896,1024 -+ * @dist_mode: Distribution mode -+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with -+ * the extractions to be used for the distribution key by calling -+ * dpni_prepare_key_cfg() relevant only when -+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' -+ * @fs_cfg: Flow Steering table configuration; only relevant if -+ * 'dist_mode = DPNI_DIST_MODE_FS' -+ */ -+struct dpni_rx_tc_dist_cfg { -+ uint16_t dist_size; -+ enum dpni_dist_mode dist_mode; -+ uint64_t key_cfg_iova; -+ struct dpni_fs_tbl_cfg fs_cfg; -+}; -+ -+/** -+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class distribution configuration -+ * -+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg() -+ * first to prepare the key_cfg_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_dist_cfg *cfg); -+ -+/** -+ * Set to select color aware mode (otherwise - color blind) -+ */ -+#define DPNI_POLICER_OPT_COLOR_AWARE 0x00000001 -+/** -+ * Set to discard frame with RED color -+ */ -+#define DPNI_POLICER_OPT_DISCARD_RED 0x00000002 -+ -+/** -+ * enum dpni_policer_mode - selecting the policer mode -+ * @DPNI_POLICER_MODE_NONE: Policer is disabled -+ * @DPNI_POLICER_MODE_PASS_THROUGH: Policer pass through -+ * @DPNI_POLICER_MODE_RFC_2698: Policer algorithm RFC 2698 -+ * @DPNI_POLICER_MODE_RFC_4115: Policer algorithm RFC 4115 -+ */ -+enum dpni_policer_mode { -+ DPNI_POLICER_MODE_NONE = 0, -+ DPNI_POLICER_MODE_PASS_THROUGH, -+ DPNI_POLICER_MODE_RFC_2698, -+ DPNI_POLICER_MODE_RFC_4115 -+}; -+ -+/** -+ * enum dpni_policer_unit - DPNI policer units -+ * @DPNI_POLICER_UNIT_BYTES: bytes units -+ * @DPNI_POLICER_UNIT_FRAMES: frames units -+ */ -+enum dpni_policer_unit { -+ DPNI_POLICER_UNIT_BYTES = 0, -+ DPNI_POLICER_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpni_policer_color - selecting the policer color -+ * @DPNI_POLICER_COLOR_GREEN: Green color -+ * @DPNI_POLICER_COLOR_YELLOW: Yellow color -+ * @DPNI_POLICER_COLOR_RED: Red color -+ */ -+enum dpni_policer_color { -+ DPNI_POLICER_COLOR_GREEN = 0, -+ DPNI_POLICER_COLOR_YELLOW, -+ DPNI_POLICER_COLOR_RED -+}; -+ -+/** -+ * struct dpni_rx_tc_policing_cfg - Policer configuration -+ * @options: Mask of available options; use 'DPNI_POLICER_OPT_' values -+ * @mode: policer mode -+ * @default_color: For pass-through mode the policer re-colors with this -+ * color any incoming packets. For Color aware non-pass-through mode: -+ * policer re-colors with this color all packets with FD[DROPP]>2. -+ * @units: Bytes or Packets -+ * @cir: Committed information rate (CIR) in Kbps or packets/second -+ * @cbs: Committed burst size (CBS) in bytes or packets -+ * @eir: Peak information rate (PIR, rfc2698) in Kbps or packets/second -+ * Excess information rate (EIR, rfc4115) in Kbps or packets/second -+ * @ebs: Peak burst size (PBS, rfc2698) in bytes or packets -+ * Excess burst size (EBS, rfc4115) in bytes or packets -+ */ -+struct dpni_rx_tc_policing_cfg { -+ uint32_t options; -+ enum dpni_policer_mode mode; -+ enum dpni_policer_unit units; -+ enum dpni_policer_color default_color; -+ uint32_t cir; -+ uint32_t cbs; -+ uint32_t eir; -+ uint32_t ebs; -+}; -+ -+/** -+ * dpni_set_rx_tc_policing() - Set Rx traffic class policing configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class policing configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_policing_cfg *cfg); -+ -+/** -+ * dpni_get_rx_tc_policing() - Get Rx traffic class policing configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class policing configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_rx_tc_policing_cfg *cfg); -+ -+/** -+ * enum dpni_congestion_unit - DPNI congestion units -+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units -+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units -+ */ -+enum dpni_congestion_unit { -+ DPNI_CONGESTION_UNIT_BYTES = 0, -+ DPNI_CONGESTION_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpni_early_drop_mode - DPNI early drop mode -+ * @DPNI_EARLY_DROP_MODE_NONE: early drop is disabled -+ * @DPNI_EARLY_DROP_MODE_TAIL: early drop in taildrop mode -+ * @DPNI_EARLY_DROP_MODE_WRED: early drop in WRED mode -+ */ -+enum dpni_early_drop_mode { -+ DPNI_EARLY_DROP_MODE_NONE = 0, -+ DPNI_EARLY_DROP_MODE_TAIL, -+ DPNI_EARLY_DROP_MODE_WRED -+}; -+ -+/** -+ * struct dpni_wred_cfg - WRED configuration -+ * @max_threshold: maximum threshold that packets may be discarded. Above this -+ * threshold all packets are discarded; must be less than 2^39; -+ * approximated to be expressed as (x+256)*2^(y-1) due to HW -+ * implementation. -+ * @min_threshold: minimum threshold that packets may be discarded at -+ * @drop_probability: probability that a packet will be discarded (1-100, -+ * associated with the max_threshold). -+ */ -+struct dpni_wred_cfg { -+ uint64_t max_threshold; -+ uint64_t min_threshold; -+ uint8_t drop_probability; -+}; -+ -+/** -+ * struct dpni_early_drop_cfg - early-drop configuration -+ * @mode: drop mode -+ * @units: units type -+ * @green: WRED - 'green' configuration -+ * @yellow: WRED - 'yellow' configuration -+ * @red: WRED - 'red' configuration -+ * @tail_drop_threshold: tail drop threshold -+ */ -+struct dpni_early_drop_cfg { -+ enum dpni_early_drop_mode mode; -+ enum dpni_congestion_unit units; -+ -+ struct dpni_wred_cfg green; -+ struct dpni_wred_cfg yellow; -+ struct dpni_wred_cfg red; -+ -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * dpni_prepare_early_drop() - prepare an early drop. -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpni_set_rx_tc_early_drop or -+ * dpni_set_tx_tc_early_drop -+ * -+ */ -+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf); -+ -+/** -+ * dpni_extract_early_drop() - extract the early drop configuration. -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called after dpni_get_rx_tc_early_drop or -+ * dpni_get_tx_tc_early_drop -+ * -+ */ -+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, -+ const uint8_t *early_drop_buf); -+ -+/** -+ * dpni_set_rx_tc_early_drop() - Set Rx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled -+ * with the early-drop configuration by calling dpni_prepare_early_drop() -+ * -+ * warning: Before calling this function, call dpni_prepare_early_drop() to -+ * prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_get_rx_tc_early_drop() - Get Rx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory -+ * -+ * warning: After calling this function, call dpni_extract_early_drop() to -+ * get the early drop configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_set_tx_tc_early_drop() - Set Tx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled -+ * with the early-drop configuration by calling dpni_prepare_early_drop() -+ * -+ * warning: Before calling this function, call dpni_prepare_early_drop() to -+ * prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_get_tx_tc_early_drop() - Get Tx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory -+ * -+ * warning: After calling this function, call dpni_extract_early_drop() to -+ * get the early drop configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * enum dpni_dest - DPNI destination types -+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and -+ * does not generate FQDAN notifications; user is expected to -+ * dequeue from the queue based on polling or other user-defined -+ * method -+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to dequeue -+ * from the queue only after notification is received -+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified DPCON -+ * object; user is expected to dequeue from the DPCON channel -+ */ -+enum dpni_dest { -+ DPNI_DEST_NONE = 0, -+ DPNI_DEST_DPIO = 1, -+ DPNI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPNI_DEST_NONE' option -+ */ -+struct dpni_dest_cfg { -+ enum dpni_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/* DPNI congestion options */ -+ -+/** -+ * CSCN message is written to message_iova once entering a -+ * congestion state (see 'threshold_entry') -+ */ -+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 -+/** -+ * CSCN message is written to message_iova once exiting a -+ * congestion state (see 'threshold_exit') -+ */ -+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 -+/** -+ * CSCN write will attempt to allocate into a cache (coherent write); -+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is selected -+ */ -+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to -+ * DPIO/DPCON's WQ channel once entering a congestion state -+ * (see 'threshold_entry') -+ */ -+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to -+ * DPIO/DPCON's WQ channel once exiting a congestion state -+ * (see 'threshold_exit') -+ */ -+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the -+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) -+ */ -+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 -+ -+/** -+ * struct dpni_congestion_notification_cfg - congestion notification -+ * configuration -+ * @units: units type -+ * @threshold_entry: above this threshold we enter a congestion state. -+ * set it to '0' to disable it -+ * @threshold_exit: below this threshold we exit the congestion state. -+ * @message_ctx: The context that will be part of the CSCN message -+ * @message_iova: I/O virtual address (must be in DMA-able memory), -+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is -+ * contained in 'options' -+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel -+ * @options: Mask of available options; use 'DPNI_CONG_OPT_' values -+ */ -+ -+struct dpni_congestion_notification_cfg { -+ enum dpni_congestion_unit units; -+ uint32_t threshold_entry; -+ uint32_t threshold_exit; -+ uint64_t message_ctx; -+ uint64_t message_iova; -+ struct dpni_dest_cfg dest_cfg; -+ uint16_t options; -+}; -+ -+/** -+ * dpni_set_rx_tc_congestion_notification() - Set Rx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_rx_tc_congestion_notification() - Get Rx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_set_tx_tc_congestion_notification() - Set Tx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_tx_tc_congestion_notification() - Get Tx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * enum dpni_flc_type - DPNI FLC types -+ * @DPNI_FLC_USER_DEFINED: select the FLC to be used for user defined value -+ * @DPNI_FLC_STASH: select the FLC to be used for stash control -+ */ -+enum dpni_flc_type { -+ DPNI_FLC_USER_DEFINED = 0, -+ DPNI_FLC_STASH = 1, -+}; -+ -+/** -+ * enum dpni_stash_size - DPNI FLC stashing size -+ * @DPNI_STASH_SIZE_0B: no stash -+ * @DPNI_STASH_SIZE_64B: stashes 64 bytes -+ * @DPNI_STASH_SIZE_128B: stashes 128 bytes -+ * @DPNI_STASH_SIZE_192B: stashes 192 bytes -+ */ -+enum dpni_stash_size { -+ DPNI_STASH_SIZE_0B = 0, -+ DPNI_STASH_SIZE_64B = 1, -+ DPNI_STASH_SIZE_128B = 2, -+ DPNI_STASH_SIZE_192B = 3, -+}; -+ -+/* DPNI FLC stash options */ -+ -+/** -+ * stashes the whole annotation area (up to 192 bytes) -+ */ -+#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001 -+ -+/** -+ * struct dpni_flc_cfg - Structure representing DPNI FLC configuration -+ * @flc_type: FLC type -+ * @options: Mask of available options; -+ * use 'DPNI_FLC_STASH_' values -+ * @frame_data_size: Size of frame data to be stashed -+ * @flow_context_size: Size of flow context to be stashed -+ * @flow_context: 1. In case flc_type is 'DPNI_FLC_USER_DEFINED': -+ * this value will be provided in the frame descriptor -+ * (FD[FLC]) -+ * 2. In case flc_type is 'DPNI_FLC_STASH': -+ * this value will be I/O virtual address of the -+ * flow-context; -+ * Must be cacheline-aligned and DMA-able memory -+ */ -+struct dpni_flc_cfg { -+ enum dpni_flc_type flc_type; -+ uint32_t options; -+ enum dpni_stash_size frame_data_size; -+ enum dpni_stash_size flow_context_size; -+ uint64_t flow_context; -+}; -+ -+/** -+ * DPNI queue modification options -+ */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPNI_QUEUE_OPT_DEST 0x00000002 -+/** Select to modify the flow-context parameters; -+ * not applicable for Tx-conf/Err queues as the FD comes from the user -+ */ -+#define DPNI_QUEUE_OPT_FLC 0x00000004 -+/** -+ * Select to modify the queue's order preservation -+ */ -+#define DPNI_QUEUE_OPT_ORDER_PRESERVATION 0x00000008 -+/* Select to modify the queue's tail-drop threshold */ -+#define DPNI_QUEUE_OPT_TAILDROP_THRESHOLD 0x00000010 -+ -+/** -+ * struct dpni_queue_cfg - Structure representing queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPNI_QUEUE_OPT_' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; valid only if 'DPNI_QUEUE_OPT_USER_CTX' -+ * is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPNI_QUEUE_OPT_DEST' is contained in 'options' -+ * @flc_cfg: Flow context configuration; in case the TC's distribution -+ * is either NONE or HASH the FLC's settings of flow#0 are used. -+ * in the case of FS (flow-steering) the flow's FLC settings -+ * are used. -+ * valid only if 'DPNI_QUEUE_OPT_FLC' is contained in 'options' -+ * @order_preservation_en: enable/disable order preservation; -+ * valid only if 'DPNI_QUEUE_OPT_ORDER_PRESERVATION' is contained -+ * in 'options' -+ * @tail_drop_threshold: set the queue's tail drop threshold in bytes; -+ * '0' value disable the threshold; maximum value is 0xE000000; -+ * valid only if 'DPNI_QUEUE_OPT_TAILDROP_THRESHOLD' is contained -+ * in 'options' -+ */ -+struct dpni_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpni_dest_cfg dest_cfg; -+ struct dpni_flc_cfg flc_cfg; -+ int order_preservation_en; -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * struct dpni_queue_attr - Structure representing queue attributes -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @flc_cfg: Flow context configuration -+ * @order_preservation_en: enable/disable order preservation -+ * @tail_drop_threshold: queue's tail drop threshold in bytes; -+ * @fqid: Virtual fqid value to be used for dequeue operations -+ */ -+struct dpni_queue_attr { -+ uint64_t user_ctx; -+ struct dpni_dest_cfg dest_cfg; -+ struct dpni_flc_cfg flc_cfg; -+ int order_preservation_en; -+ uint32_t tail_drop_threshold; -+ -+ uint32_t fqid; -+}; -+ -+/** -+ * DPNI Tx flow modification options -+ */ -+ -+/** -+ * Select to modify the settings for dedicate Tx confirmation/error -+ */ -+#define DPNI_TX_FLOW_OPT_TX_CONF_ERROR 0x00000001 -+/** -+ * Select to modify the L3 checksum generation setting -+ */ -+#define DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN 0x00000010 -+/** -+ * Select to modify the L4 checksum generation setting -+ */ -+#define DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN 0x00000020 -+ -+/** -+ * struct dpni_tx_flow_cfg - Structure representing Tx flow configuration -+ * @options: Flags representing the suggested modifications to the Tx flow; -+ * Use any combination 'DPNI_TX_FLOW_OPT_' flags -+ * @use_common_tx_conf_queue: Set to '1' to use the common (default) Tx -+ * confirmation and error queue; Set to '0' to use the private -+ * Tx confirmation and error queue; valid only if -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' wasn't set at DPNI creation -+ * and 'DPNI_TX_FLOW_OPT_TX_CONF_ERROR' is contained in 'options' -+ * @l3_chksum_gen: Set to '1' to enable L3 checksum generation; '0' to disable; -+ * valid only if 'DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN' is contained in 'options' -+ * @l4_chksum_gen: Set to '1' to enable L4 checksum generation; '0' to disable; -+ * valid only if 'DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN' is contained in 'options' -+ */ -+struct dpni_tx_flow_cfg { -+ uint32_t options; -+ int use_common_tx_conf_queue; -+ int l3_chksum_gen; -+ int l4_chksum_gen; -+}; -+ -+/** -+ * dpni_set_tx_flow() - Set Tx flow configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: Provides (or returns) the sender's flow ID; -+ * for each new sender set (*flow_id) to 'DPNI_NEW_FLOW_ID' to generate -+ * a new flow_id; this ID should be used as the QDBIN argument -+ * in enqueue operations -+ * @cfg: Tx flow configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *flow_id, -+ const struct dpni_tx_flow_cfg *cfg); -+ -+/** -+ * struct dpni_tx_flow_attr - Structure representing Tx flow attributes -+ * @use_common_tx_conf_queue: '1' if using common (default) Tx confirmation and -+ * error queue; '0' if using private Tx confirmation and error queue -+ * @l3_chksum_gen: '1' if L3 checksum generation is enabled; '0' if disabled -+ * @l4_chksum_gen: '1' if L4 checksum generation is enabled; '0' if disabled -+ */ -+struct dpni_tx_flow_attr { -+ int use_common_tx_conf_queue; -+ int l3_chksum_gen; -+ int l4_chksum_gen; -+}; -+ -+/** -+ * dpni_get_tx_flow() - Get Tx flow attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function -+ * @attr: Returned Tx flow attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_flow_attr *attr); -+ -+/** -+ * struct dpni_tx_conf_cfg - Structure representing Tx conf configuration -+ * @errors_only: Set to '1' to report back only error frames; -+ * Set to '0' to confirm transmission/error for all transmitted frames; -+ * @queue_cfg: Queue configuration -+ */ -+struct dpni_tx_conf_cfg { -+ int errors_only; -+ struct dpni_queue_cfg queue_cfg; -+}; -+ -+/** -+ * dpni_set_tx_conf() - Set Tx confirmation and error queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: Queue configuration -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * if 'DPNI_OPT_TX_CONF_DISABLED' was selected, only error frames are reported -+ * back - successfully transmitted frames are not confirmed. Otherwise, all -+ * transmitted frames are sent for confirmation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_tx_conf_cfg *cfg); -+ -+/** -+ * struct dpni_tx_conf_attr - Structure representing Tx conf attributes -+ * @errors_only: '1' if only error frames are reported back; '0' if all -+ * transmitted frames are confirmed -+ * @queue_attr: Queue attributes -+ */ -+struct dpni_tx_conf_attr { -+ int errors_only; -+ struct dpni_queue_attr queue_attr; -+}; -+ -+/** -+ * dpni_get_tx_conf() - Get Tx confirmation and error queue attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @attr: Returned tx-conf attributes -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_conf_attr *attr); -+ -+/** -+ * dpni_set_tx_conf_congestion_notification() - Set Tx conf congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: congestion notification configuration -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_tx_conf_congestion_notification() - Get Tx conf congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: congestion notification -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_set_tx_conf_revoke() - Tx confirmation revocation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @revoke: revoke or not -+ * -+ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not -+ * selected at DPNI creation. -+ * Calling this function with 'revoke' set to '1' disables all transmit -+ * confirmation (including the private confirmation queues), regardless of -+ * previous settings; Note that in this case, Tx error frames are still -+ * enqueued to the general transmit errors queue. -+ * Calling this function with 'revoke' set to '0' restores the previous -+ * settings for both general and private transmit confirmation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int revoke); -+ -+/** -+ * dpni_set_rx_flow() - Set Rx flow configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7); -+ * use 'DPNI_ALL_TCS' to set all TCs and all flows -+ * @flow_id: Rx flow id within the traffic class; use -+ * 'DPNI_ALL_TC_FLOWS' to set all flows within -+ * this tc_id; ignored if tc_id is set to -+ * 'DPNI_ALL_TCS'; -+ * @cfg: Rx flow configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ const struct dpni_queue_cfg *cfg); -+ -+/** -+ * dpni_get_rx_flow() - Get Rx flow attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @flow_id: Rx flow id within the traffic class -+ * @attr: Returned Rx flow attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ struct dpni_queue_attr *attr); -+ -+/** -+ * dpni_set_rx_err_queue() - Set Rx error queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_queue_cfg *cfg); -+ -+/** -+ * dpni_get_rx_err_queue() - Get Rx error queue attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @attr: Returned Queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_queue_attr *attr); -+ -+/** -+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration -+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with -+ * key extractions to be used as the QoS criteria by calling -+ * dpni_prepare_key_cfg() -+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); -+ * '0' to use the 'default_tc' in such cases -+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 -+ */ -+struct dpni_qos_tbl_cfg { -+ uint64_t key_cfg_iova; -+ int discard_on_miss; -+ uint8_t default_tc; -+}; -+ -+/** -+ * dpni_set_qos_table() - Set QoS mapping table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS table configuration -+ * -+ * This function and all QoS-related functions require that -+ *'max_tcs > 1' was set at DPNI creation. -+ * -+ * warning: Before calling this function, call dpni_prepare_key_cfg() to -+ * prepare the key_cfg_iova parameter -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_qos_tbl_cfg *cfg); -+ -+/** -+ * struct dpni_rule_cfg - Rule configuration for table lookup -+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory) -+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) -+ * @key_size: key and mask size (in bytes) -+ */ -+struct dpni_rule_cfg { -+ uint64_t key_iova; -+ uint64_t mask_iova; -+ uint8_t key_size; -+}; -+ -+/** -+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS rule to add -+ * @tc_id: Traffic class selection (0-7) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg, -+ uint8_t tc_id); -+ -+/** -+ * dpni_remove_qos_entry() - Remove QoS mapping entry -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS rule to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg); -+ -+/** -+ * dpni_clear_qos_table() - Clear all QoS mapping entries -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Following this function call, all frames are directed to -+ * the default traffic class (0) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class -+ * (to select a flow ID) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Flow steering rule to add -+ * @flow_id: Flow id selection (must be smaller than the -+ * distribution size of the traffic class) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg, -+ uint16_t flow_id); -+ -+/** -+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific -+ * traffic class -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Flow steering rule to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg); -+ -+/** -+ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific -+ * traffic class -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id); -+ -+/** -+ * dpni_set_vlan_insertion() - Enable/disable VLAN insertion for egress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set -+ * at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_vlan_removal() - Enable/disable VLAN removal for ingress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set -+ * at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_ipr() - Enable/disable IP reassembly of ingress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_IPR' option is set at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_ipr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_ipf() - Enable/disable IP fragmentation of egress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_IPF' option is set at DPNI -+ * creation. Fragmentation is performed according to MTU value -+ * set by dpni_set_mtu() function -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_ipf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+#endif /* __FSL_DPNI_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h -new file mode 100644 -index 0000000..c0f8af0 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h -@@ -0,0 +1,1058 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPNI_CMD_H -+#define _FSL_DPNI_CMD_H -+ -+/* DPNI Version */ -+#define DPNI_VER_MAJOR 6 -+#define DPNI_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPNI_CMDID_OPEN 0x801 -+#define DPNI_CMDID_CLOSE 0x800 -+#define DPNI_CMDID_CREATE 0x901 -+#define DPNI_CMDID_DESTROY 0x900 -+ -+#define DPNI_CMDID_ENABLE 0x002 -+#define DPNI_CMDID_DISABLE 0x003 -+#define DPNI_CMDID_GET_ATTR 0x004 -+#define DPNI_CMDID_RESET 0x005 -+#define DPNI_CMDID_IS_ENABLED 0x006 -+ -+#define DPNI_CMDID_SET_IRQ 0x010 -+#define DPNI_CMDID_GET_IRQ 0x011 -+#define DPNI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPNI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPNI_CMDID_SET_IRQ_MASK 0x014 -+#define DPNI_CMDID_GET_IRQ_MASK 0x015 -+#define DPNI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPNI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPNI_CMDID_SET_POOLS 0x200 -+#define DPNI_CMDID_GET_RX_BUFFER_LAYOUT 0x201 -+#define DPNI_CMDID_SET_RX_BUFFER_LAYOUT 0x202 -+#define DPNI_CMDID_GET_TX_BUFFER_LAYOUT 0x203 -+#define DPNI_CMDID_SET_TX_BUFFER_LAYOUT 0x204 -+#define DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT 0x205 -+#define DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT 0x206 -+#define DPNI_CMDID_SET_L3_CHKSUM_VALIDATION 0x207 -+#define DPNI_CMDID_GET_L3_CHKSUM_VALIDATION 0x208 -+#define DPNI_CMDID_SET_L4_CHKSUM_VALIDATION 0x209 -+#define DPNI_CMDID_GET_L4_CHKSUM_VALIDATION 0x20A -+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR 0x20B -+#define DPNI_CMDID_SET_TX_CONF_REVOKE 0x20C -+ -+#define DPNI_CMDID_GET_QDID 0x210 -+#define DPNI_CMDID_GET_SP_INFO 0x211 -+#define DPNI_CMDID_GET_TX_DATA_OFFSET 0x212 -+#define DPNI_CMDID_GET_COUNTER 0x213 -+#define DPNI_CMDID_SET_COUNTER 0x214 -+#define DPNI_CMDID_GET_LINK_STATE 0x215 -+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH 0x216 -+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH 0x217 -+#define DPNI_CMDID_SET_MTU 0x218 -+#define DPNI_CMDID_GET_MTU 0x219 -+#define DPNI_CMDID_SET_LINK_CFG 0x21A -+#define DPNI_CMDID_SET_TX_SHAPING 0x21B -+ -+#define DPNI_CMDID_SET_MCAST_PROMISC 0x220 -+#define DPNI_CMDID_GET_MCAST_PROMISC 0x221 -+#define DPNI_CMDID_SET_UNICAST_PROMISC 0x222 -+#define DPNI_CMDID_GET_UNICAST_PROMISC 0x223 -+#define DPNI_CMDID_SET_PRIM_MAC 0x224 -+#define DPNI_CMDID_GET_PRIM_MAC 0x225 -+#define DPNI_CMDID_ADD_MAC_ADDR 0x226 -+#define DPNI_CMDID_REMOVE_MAC_ADDR 0x227 -+#define DPNI_CMDID_CLR_MAC_FILTERS 0x228 -+ -+#define DPNI_CMDID_SET_VLAN_FILTERS 0x230 -+#define DPNI_CMDID_ADD_VLAN_ID 0x231 -+#define DPNI_CMDID_REMOVE_VLAN_ID 0x232 -+#define DPNI_CMDID_CLR_VLAN_FILTERS 0x233 -+ -+#define DPNI_CMDID_SET_RX_TC_DIST 0x235 -+#define DPNI_CMDID_SET_TX_FLOW 0x236 -+#define DPNI_CMDID_GET_TX_FLOW 0x237 -+#define DPNI_CMDID_SET_RX_FLOW 0x238 -+#define DPNI_CMDID_GET_RX_FLOW 0x239 -+#define DPNI_CMDID_SET_RX_ERR_QUEUE 0x23A -+#define DPNI_CMDID_GET_RX_ERR_QUEUE 0x23B -+ -+#define DPNI_CMDID_SET_RX_TC_POLICING 0x23E -+#define DPNI_CMDID_SET_RX_TC_EARLY_DROP 0x23F -+ -+#define DPNI_CMDID_SET_QOS_TBL 0x240 -+#define DPNI_CMDID_ADD_QOS_ENT 0x241 -+#define DPNI_CMDID_REMOVE_QOS_ENT 0x242 -+#define DPNI_CMDID_CLR_QOS_TBL 0x243 -+#define DPNI_CMDID_ADD_FS_ENT 0x244 -+#define DPNI_CMDID_REMOVE_FS_ENT 0x245 -+#define DPNI_CMDID_CLR_FS_ENT 0x246 -+#define DPNI_CMDID_SET_VLAN_INSERTION 0x247 -+#define DPNI_CMDID_SET_VLAN_REMOVAL 0x248 -+#define DPNI_CMDID_SET_IPR 0x249 -+#define DPNI_CMDID_SET_IPF 0x24A -+ -+#define DPNI_CMDID_SET_TX_SELECTION 0x250 -+#define DPNI_CMDID_GET_RX_TC_POLICING 0x251 -+#define DPNI_CMDID_GET_RX_TC_EARLY_DROP 0x252 -+#define DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION 0x253 -+#define DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION 0x254 -+#define DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION 0x255 -+#define DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION 0x256 -+#define DPNI_CMDID_SET_TX_CONF 0x257 -+#define DPNI_CMDID_GET_TX_CONF 0x258 -+#define DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION 0x259 -+#define DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION 0x25A -+#define DPNI_CMDID_SET_TX_TC_EARLY_DROP 0x25B -+#define DPNI_CMDID_GET_TX_TC_EARLY_DROP 0x25C -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_OPEN(cmd, dpni_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id) -+ -+#define DPNI_PREP_EXTENDED_CFG(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ -+ MC_PREP_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ -+ MC_PREP_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ -+ MC_PREP_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ -+ MC_PREP_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ -+ MC_PREP_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ -+ MC_PREP_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ -+ MC_PREP_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ -+ MC_PREP_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ -+ MC_PREP_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ -+ MC_PREP_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ -+ MC_PREP_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ -+ MC_PREP_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ -+ MC_PREP_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ -+ MC_PREP_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ -+ MC_PREP_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ -+ MC_PREP_OP(ext, 4, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv4); \ -+ MC_PREP_OP(ext, 4, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv6); \ -+ MC_PREP_OP(ext, 4, 32, 16, uint16_t, \ -+ cfg->ipr_cfg.max_reass_frm_size); \ -+ MC_PREP_OP(ext, 5, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv4); \ -+ MC_PREP_OP(ext, 5, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv6); \ -+} while (0) -+ -+#define DPNI_EXT_EXTENDED_CFG(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ -+ MC_EXT_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ -+ MC_EXT_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ -+ MC_EXT_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ -+ MC_EXT_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ -+ MC_EXT_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ -+ MC_EXT_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ -+ MC_EXT_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ -+ MC_EXT_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ -+ MC_EXT_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ -+ MC_EXT_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ -+ MC_EXT_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ -+ MC_EXT_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ -+ MC_EXT_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ -+ MC_EXT_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ -+ MC_EXT_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ -+ MC_EXT_OP(ext, 4, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv4); \ -+ MC_EXT_OP(ext, 4, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv6); \ -+ MC_EXT_OP(ext, 4, 32, 16, uint16_t, \ -+ cfg->ipr_cfg.max_reass_frm_size); \ -+ MC_EXT_OP(ext, 5, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv4); \ -+ MC_EXT_OP(ext, 5, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv6); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->adv.max_tcs); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->adv.max_senders); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->adv.options); \ -+ MC_CMD_OP(cmd, 2, 0, 8, uint8_t, cfg->adv.max_unicast_filters); \ -+ MC_CMD_OP(cmd, 2, 8, 8, uint8_t, cfg->adv.max_multicast_filters); \ -+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, cfg->adv.max_vlan_filters); \ -+ MC_CMD_OP(cmd, 2, 24, 8, uint8_t, cfg->adv.max_qos_entries); \ -+ MC_CMD_OP(cmd, 2, 32, 8, uint8_t, cfg->adv.max_qos_key_size); \ -+ MC_CMD_OP(cmd, 2, 48, 8, uint8_t, cfg->adv.max_dist_key_size); \ -+ MC_CMD_OP(cmd, 2, 56, 8, enum net_prot, cfg->adv.start_hdr); \ -+ MC_CMD_OP(cmd, 4, 48, 8, uint8_t, cfg->adv.max_policers); \ -+ MC_CMD_OP(cmd, 4, 56, 8, uint8_t, cfg->adv.max_congestion_ctrl); \ -+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, cfg->adv.ext_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_POOLS(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \ -+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\ -+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_ATTR(cmd, attr) \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, attr->ext_cfg_iova) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->max_tcs); \ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, attr->max_senders); \ -+ MC_RSP_OP(cmd, 0, 48, 8, enum net_prot, attr->start_hdr); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options); \ -+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->max_unicast_filters); \ -+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->max_multicast_filters);\ -+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->max_vlan_filters); \ -+ MC_RSP_OP(cmd, 2, 24, 8, uint8_t, attr->max_qos_entries); \ -+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->max_qos_key_size); \ -+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->max_dist_key_size); \ -+ MC_RSP_OP(cmd, 4, 48, 8, uint8_t, attr->max_policers); \ -+ MC_RSP_OP(cmd, 4, 56, 8, uint8_t, attr->max_congestion_ctrl); \ -+ MC_RSP_OP(cmd, 5, 32, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 5, 48, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \ -+ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_QDID(cmd, qdid) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_SP_INFO(cmd, sp_info) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, sp_info->spids[0]); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, sp_info->spids[1]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_DATA_OFFSET(cmd, data_offset) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, data_offset) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_COUNTER(cmd, counter) \ -+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_COUNTER(cmd, value) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, value) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_COUNTER(cmd, counter, value) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, value); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_LINK_STATE(cmd, state) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, tx_shaper->max_burst_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, tx_shaper->rate_limit);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MTU(cmd, mtu) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, mtu) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MTU(cmd, mtu) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, mtu) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \ -+ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_FILTERS(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_SELECTION(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[0].mode); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[1].mode); \ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 1, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[2].mode); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 1, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[3].mode); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[4].mode); \ -+ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[5].mode); \ -+ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[6].mode); \ -+ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[7].mode); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \ -+ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \ -+ cfg->fs_cfg.miss_action); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_FLOW(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 43, 1, int, cfg->l3_chksum_gen);\ -+ MC_CMD_OP(cmd, 0, 44, 1, int, cfg->l4_chksum_gen);\ -+ MC_CMD_OP(cmd, 0, 45, 1, int, cfg->use_common_tx_conf_queue);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_SET_TX_FLOW(cmd, flow_id) \ -+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_TX_FLOW(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_FLOW(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 43, 1, int, attr->l3_chksum_gen);\ -+ MC_RSP_OP(cmd, 0, 44, 1, int, attr->l4_chksum_gen);\ -+ MC_RSP_OP(cmd, 0, 45, 1, int, attr->use_common_tx_conf_queue);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.frame_data_size);\ -+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.flow_context_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ -+ MC_CMD_OP(cmd, 5, 0, 32, uint32_t, cfg->tail_drop_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_FLOW(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ -+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.frame_data_size);\ -+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.flow_context_size);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.frame_data_size);\ -+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.flow_context_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ -+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.frame_data_size);\ -+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.flow_context_size);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, revoke) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_QOS_TABLE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->default_tc); \ -+ MC_CMD_OP(cmd, 0, 40, 1, int, cfg->discard_on_miss); \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_INSERTION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_REMOVAL(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IPR(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IPF(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ -+ MC_CMD_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ -+ MC_RSP_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_PREP_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ -+ MC_PREP_OP(ext, 0, 2, 2, \ -+ enum dpni_congestion_unit, cfg->units); \ -+ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+ MC_PREP_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ -+ MC_PREP_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ -+ MC_PREP_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_EXT_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ -+ MC_EXT_OP(ext, 0, 2, 2, \ -+ enum dpni_congestion_unit, cfg->units); \ -+ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+ MC_EXT_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ -+ MC_EXT_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ -+ MC_EXT_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) -+ -+#define DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) -+ -+#define DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->queue_cfg.dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, \ -+ cfg->queue_cfg.dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->errors_only); \ -+ MC_CMD_OP(cmd, 0, 46, 1, int, cfg->queue_cfg.order_preservation_en); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->queue_cfg.user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->queue_cfg.options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->queue_cfg.dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 32, uint32_t, \ -+ cfg->queue_cfg.tail_drop_threshold); \ -+ MC_CMD_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ -+ cfg->queue_cfg.flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ -+ cfg->queue_cfg.flc_cfg.frame_data_size); \ -+ MC_CMD_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ -+ cfg->queue_cfg.flc_cfg.flow_context_size); \ -+ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->queue_cfg.flc_cfg.options); \ -+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, \ -+ cfg->queue_cfg.flc_cfg.flow_context); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_CONF(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+#define DPNI_RSP_GET_TX_CONF(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, \ -+ attr->queue_attr.dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, \ -+ attr->queue_attr.dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->errors_only); \ -+ MC_RSP_OP(cmd, 0, 46, 1, int, \ -+ attr->queue_attr.order_preservation_en); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->queue_attr.user_ctx); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, attr->queue_attr.dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, \ -+ attr->queue_attr.tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->queue_attr.fqid); \ -+ MC_RSP_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ -+ attr->queue_attr.flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ -+ attr->queue_attr.flc_cfg.frame_data_size); \ -+ MC_RSP_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ -+ attr->queue_attr.flc_cfg.flow_context_size); \ -+ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, attr->queue_attr.flc_cfg.options); \ -+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, \ -+ attr->queue_attr.flc_cfg.flow_context); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+#define DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#endif /* _FSL_DPNI_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dprc.h b/drivers/net/dpaa2/mc/fsl_dprc.h -new file mode 100644 -index 0000000..c831f46 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dprc.h -@@ -0,0 +1,1032 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPRC_H -+#define _FSL_DPRC_H -+ -+/* Data Path Resource Container API -+ * Contains DPRC API for managing and querying DPAA resources -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * Set this value as the icid value in dprc_cfg structure when creating a -+ * container, in case the ICID is not selected by the user and should be -+ * allocated by the DPRC from the pool of ICIDs. -+ */ -+#define DPRC_GET_ICID_FROM_POOL (uint16_t)(~(0)) -+ -+/** -+ * Set this value as the portal_id value in dprc_cfg structure when creating a -+ * container, in case the portal ID is not specifically selected by the -+ * user and should be allocated by the DPRC from the pool of portal ids. -+ */ -+#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0)) -+ -+/** -+ * dprc_get_container_id() - Get container ID associated with a given portal. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @container_id: Requested container ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_container_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int *container_id); -+ -+/** -+ * dprc_open() - Open DPRC object for use -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @container_id: Container ID to open -+ * @token: Returned token of DPRC object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Required before any operation on the object. -+ */ -+int dprc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int container_id, -+ uint16_t *token); -+ -+/** -+ * dprc_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * Container general options -+ * -+ * These options may be selected at container creation by the container creator -+ * and can be retrieved using dprc_get_attributes() -+ */ -+ -+/** -+ * Spawn Policy Option allowed - Indicates that the new container is allowed -+ * to spawn and have its own child containers. -+ */ -+#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001 -+ -+/** -+ * General Container allocation policy - Indicates that the new container is -+ * allowed to allocate requested resources from its parent container; if not -+ * set, the container is only allowed to use resources in its own pools; Note -+ * that this is a container's global policy, but the parent container may -+ * override it and set specific quota per resource type. -+ */ -+#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002 -+ -+/** -+ * Object initialization allowed - software context associated with this -+ * container is allowed to invoke object initialization operations. -+ */ -+#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004 -+ -+/** -+ * Topology change allowed - software context associated with this -+ * container is allowed to invoke topology operations, such as attach/detach -+ * of network objects. -+ */ -+#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008 -+ -+/** -+ * AIOP - Indicates that container belongs to AIOP. -+ */ -+#define DPRC_CFG_OPT_AIOP 0x00000020 -+ -+/** -+ * IRQ Config - Indicates that the container allowed to configure its IRQs. -+ */ -+#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040 -+ -+/** -+ * struct dprc_cfg - Container configuration options -+ * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free -+ * ICID value is allocated by the DPRC -+ * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free -+ * portal ID is allocated by the DPRC -+ * @options: Combination of 'DPRC_CFG_OPT_' options -+ * @label: Object's label -+ */ -+struct dprc_cfg { -+ uint16_t icid; -+ int portal_id; -+ uint64_t options; -+ char label[16]; -+}; -+ -+/** -+ * dprc_create_container() - Create child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @cfg: Child container configuration -+ * @child_container_id: Returned child container ID -+ * @child_portal_offset: Returned child portal offset from MC portal base -+ * -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_create_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_cfg *cfg, -+ int *child_container_id, -+ uint64_t *child_portal_offset); -+ -+/** -+ * dprc_destroy_container() - Destroy child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the container to destroy -+ * -+ * This function terminates the child container, so following this call the -+ * child container ID becomes invalid. -+ * -+ * Notes: -+ * - All resources and objects of the destroyed container are returned to the -+ * parent container or destroyed if were created be the destroyed container. -+ * - This function destroy all the child containers of the specified -+ * container prior to destroying the container itself. -+ * -+ * warning: Only the parent container is allowed to destroy a child policy -+ * Container 0 can't be destroyed -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ */ -+int dprc_destroy_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id); -+ -+/** -+ * dprc_reset_container - Reset child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the container to reset -+ * -+ * In case a software context crashes or becomes non-responsive, the parent -+ * may wish to reset its resources container before the software context is -+ * restarted. -+ * -+ * This routine informs all objects assigned to the child container that the -+ * container is being reset, so they may perform any cleanup operations that are -+ * needed. All objects handles that were owned by the child container shall be -+ * closed. -+ * -+ * Note that such request may be submitted even if the child software context -+ * has not crashed, but the resulting object cleanup operations will not be -+ * aware of that. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_reset_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id); -+ -+/** -+ * DPRC IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPRC_IRQ_INDEX 0 -+ -+/** -+ * Number of dprc's IRQs -+ */ -+#define DPRC_NUM_OF_IRQS 1 -+ -+/* DPRC IRQ events */ -+/** -+ * IRQ event - Indicates that a new object added to the container -+ */ -+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001 -+/** -+ * IRQ event - Indicates that an object was removed from the container -+ */ -+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002 -+/** -+ * IRQ event - Indicates that resources added to the container -+ */ -+#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004 -+/** -+ * IRQ event - Indicates that resources removed from the container -+ */ -+#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008 -+/** -+ * IRQ event - Indicates that one of the descendant containers that opened by -+ * this container is destroyed -+ */ -+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010 -+/** -+ * IRQ event - Indicates that on one of the container's opened object is -+ * destroyed -+ */ -+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020 -+/** -+ * Irq event - Indicates that object is created at the container -+ */ -+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040 -+ -+/** -+ * struct dprc_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dprc_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_irq() - Get IRQ information from the DPRC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dprc_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dprc_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dprc_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dprc_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dprc_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dprc_attributes - Container attributes -+ * @container_id: Container's ID -+ * @icid: Container's ICID -+ * @portal_id: Container's portal ID -+ * @options: Container's options as set at container's creation -+ * @version: DPRC version -+ */ -+struct dprc_attributes { -+ int container_id; -+ uint16_t icid; -+ int portal_id; -+ uint64_t options; -+ /** -+ * struct version - DPRC version -+ * @major: DPRC major version -+ * @minor: DPRC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dprc_get_attributes() - Obtains container attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @attributes: Returned container attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_attributes *attributes); -+ -+/** -+ * dprc_set_res_quota() - Set allocation policy for a specific resource/object -+ * type in a child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @type: Resource/object type -+ * @quota: Sets the maximum number of resources of the selected type -+ * that the child container is allowed to allocate from its parent; -+ * when quota is set to -1, the policy is the same as container's -+ * general policy. -+ * -+ * Allocation policy determines whether or not a container may allocate -+ * resources from its parent. Each container has a 'global' allocation policy -+ * that is set when the container is created. -+ * -+ * This function sets allocation policy for a specific resource type. -+ * The default policy for all resource types matches the container's 'global' -+ * allocation policy. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Only the parent container is allowed to change a child policy. -+ */ -+int dprc_set_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t quota); -+ -+/** -+ * dprc_get_res_quota() - Gets the allocation policy of a specific -+ * resource/object type in a child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @type: resource/object type -+ * @quota: Returnes the maximum number of resources of the selected type -+ * that the child container is allowed to allocate from the parent; -+ * when quota is set to -1, the policy is the same as container's -+ * general policy. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t *quota); -+ -+/* Resource request options */ -+ -+/** -+ * Explicit resource ID request - The requested objects/resources -+ * are explicit and sequential (in case of resources). -+ * The base ID is given at res_req at base_align field -+ */ -+#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001 -+ -+/** -+ * Aligned resources request - Relevant only for resources -+ * request (and not objects). Indicates that resources base ID should be -+ * sequential and aligned to the value given at dprc_res_req base_align field -+ */ -+#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002 -+ -+/** -+ * Plugged Flag - Relevant only for object assignment request. -+ * Indicates that after all objects assigned. An interrupt will be invoked at -+ * the relevant GPP. The assigned object will be marked as plugged. -+ * plugged objects can't be assigned from their container -+ */ -+#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004 -+ -+/** -+ * struct dprc_res_req - Resource request descriptor, to be used in assignment -+ * or un-assignment of resources and objects. -+ * @type: Resource/object type: Represent as a NULL terminated string. -+ * This string may received by using dprc_get_pool() to get resource -+ * type and dprc_get_obj() to get object type; -+ * Note: it is not possible to assign/un-assign DPRC objects -+ * @num: Number of resources -+ * @options: Request options: combination of DPRC_RES_REQ_OPT_ options -+ * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT -+ * is set at option), this field represents the required base ID -+ * for resource allocation; In case of aligned assignment -+ * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field -+ * indicates the required alignment for the resource ID(s) - -+ * use 0 if there is no alignment or explicit ID requirements -+ */ -+struct dprc_res_req { -+ char type[16]; -+ uint32_t num; -+ uint32_t options; -+ int id_base_align; -+}; -+ -+/** -+ * dprc_assign() - Assigns objects or resource to a child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @container_id: ID of the child container -+ * @res_req: Describes the type and amount of resources to -+ * assign to the given container -+ * -+ * Assignment is usually done by a parent (this DPRC) to one of its child -+ * containers. -+ * -+ * According to the DPRC allocation policy, the assigned resources may be taken -+ * (allocated) from the container's ancestors, if not enough resources are -+ * available in the container itself. -+ * -+ * The type of assignment depends on the dprc_res_req options, as follows: -+ * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have -+ * the explicit base ID specified at the id_base_align field of res_req. -+ * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be -+ * aligned to the value given at id_base_align field of res_req. -+ * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment, -+ * and indicates that the object must be set to the plugged state. -+ * -+ * A container may use this function with its own ID in order to change a -+ * object state to plugged or unplugged. -+ * -+ * If IRQ information has been set in the child DPRC, it will signal an -+ * interrupt following every change in its object assignment. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_assign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int container_id, -+ struct dprc_res_req *res_req); -+ -+/** -+ * dprc_unassign() - Un-assigns objects or resources from a child container -+ * and moves them into this (parent) DPRC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @res_req: Describes the type and amount of resources to un-assign from -+ * the child container -+ * -+ * Un-assignment of objects can succeed only if the object is not in the -+ * plugged or opened state. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_unassign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ struct dprc_res_req *res_req); -+ -+/** -+ * dprc_get_pool_count() - Get the number of dprc's pools -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @pool_count: Returned number of resource pools in the dprc -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_pool_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *pool_count); -+ -+/** -+ * dprc_get_pool() - Get the type (string) of a certain dprc's pool -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @pool_index: Index of the pool to be queried (< pool_count) -+ * @type: The type of the pool -+ * -+ * The pool types retrieved one by one by incrementing -+ * pool_index up to (not including) the value of pool_count returned -+ * from dprc_get_pool_count(). dprc_get_pool_count() must -+ * be called prior to dprc_get_pool(). -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_pool(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int pool_index, -+ char *type); -+ -+/** -+ * dprc_get_obj_count() - Obtains the number of objects in the DPRC -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_count: Number of objects assigned to the DPRC -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *obj_count); -+ -+/** -+ * Objects Attributes Flags -+ */ -+ -+/** -+ * Opened state - Indicates that an object is open by at least one owner -+ */ -+#define DPRC_OBJ_STATE_OPEN 0x00000001 -+/** -+ * Plugged state - Indicates that the object is plugged -+ */ -+#define DPRC_OBJ_STATE_PLUGGED 0x00000002 -+ -+/** -+ * Shareability flag - Object flag indicating no memory shareability. -+ * the object generates memory accesses that are non coherent with other -+ * masters; -+ * user is responsible for proper memory handling through IOMMU configuration. -+ */ -+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 -+ -+/** -+ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj() -+ * @type: Type of object: NULL terminated string -+ * @id: ID of logical object resource -+ * @vendor: Object vendor identifier -+ * @ver_major: Major version number -+ * @ver_minor: Minor version number -+ * @irq_count: Number of interrupts supported by the object -+ * @region_count: Number of mappable regions supported by the object -+ * @state: Object state: combination of DPRC_OBJ_STATE_ states -+ * @label: Object label -+ * @flags: Object's flags -+ */ -+struct dprc_obj_desc { -+ char type[16]; -+ int id; -+ uint16_t vendor; -+ uint16_t ver_major; -+ uint16_t ver_minor; -+ uint8_t irq_count; -+ uint8_t region_count; -+ uint32_t state; -+ char label[16]; -+ uint16_t flags; -+}; -+ -+/** -+ * dprc_get_obj() - Get general information on an object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_index: Index of the object to be queried (< obj_count) -+ * @obj_desc: Returns the requested object descriptor -+ * -+ * The object descriptors are retrieved one by one by incrementing -+ * obj_index up to (not including) the value of obj_count returned -+ * from dprc_get_obj_count(). dprc_get_obj_count() must -+ * be called prior to dprc_get_obj(). -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int obj_index, -+ struct dprc_obj_desc *obj_desc); -+ -+/** -+ * dprc_get_obj_desc() - Get object descriptor. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: The type of the object to get its descriptor. -+ * @obj_id: The id of the object to get its descriptor -+ * @obj_desc: The returned descriptor to fill and return to the user -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ */ -+int dprc_get_obj_desc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ struct dprc_obj_desc *obj_desc); -+ -+/** -+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Type of the object to set its IRQ -+ * @obj_id: ID of the object to set its IRQ -+ * @irq_index: The interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_obj_irq() - Get IRQ information from object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Type od the object to get its IRQ -+ * @obj_id: ID of the object to get its IRQ -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: The returned IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_res_count() - Obtains the number of free resources that are -+ * assigned to this container, by pool type -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @type: pool type -+ * @res_count: Returned number of free resources of the given -+ * resource type that are assigned to this DPRC -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ int *res_count); -+ -+/** -+ * enum dprc_iter_status - Iteration status -+ * @DPRC_ITER_STATUS_FIRST: Perform first iteration -+ * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed -+ * @DPRC_ITER_STATUS_LAST: Indicates last iteration -+ */ -+enum dprc_iter_status { -+ DPRC_ITER_STATUS_FIRST = 0, -+ DPRC_ITER_STATUS_MORE = 1, -+ DPRC_ITER_STATUS_LAST = 2 -+}; -+ -+/** -+ * struct dprc_res_ids_range_desc - Resource ID range descriptor -+ * @base_id: Base resource ID of this range -+ * @last_id: Last resource ID of this range -+ * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at -+ * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE, -+ * additional iterations are needed, until the returned marker is -+ * DPRC_ITER_STATUS_LAST -+ */ -+struct dprc_res_ids_range_desc { -+ int base_id; -+ int last_id; -+ enum dprc_iter_status iter_status; -+}; -+ -+/** -+ * dprc_get_res_ids() - Obtains IDs of free resources in the container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @type: pool type -+ * @range_desc: range descriptor -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_ids(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ struct dprc_res_ids_range_desc *range_desc); -+ -+/** -+ * Region flags -+ */ -+/** -+ * Cacheable - Indicates that region should be mapped as cacheable -+ */ -+#define DPRC_REGION_CACHEABLE 0x00000001 -+ -+/** -+ * enum dprc_region_type - Region type -+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region -+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region -+ */ -+enum dprc_region_type { -+ DPRC_REGION_TYPE_MC_PORTAL, -+ DPRC_REGION_TYPE_QBMAN_PORTAL -+}; -+ -+/** -+ * struct dprc_region_desc - Mappable region descriptor -+ * @base_offset: Region offset from region's base address. -+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals -+ * base address; For DPIO, region base is offset from SoC QMan portals -+ * base address -+ * @size: Region size (in bytes) -+ * @flags: Region attributes -+ * @type: Portal region type -+ */ -+struct dprc_region_desc { -+ uint32_t base_offset; -+ uint32_t size; -+ uint32_t flags; -+ enum dprc_region_type type; -+}; -+ -+/** -+ * dprc_get_obj_region() - Get region information for a specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Object type as returned in dprc_get_obj() -+ * @obj_id: Unique object instance as returned in dprc_get_obj() -+ * @region_index: The specific region to query -+ * @region_desc: Returns the requested region descriptor -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_region(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t region_index, -+ struct dprc_region_desc *region_desc); -+ -+/** -+ * dprc_set_obj_label() - Set object label. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Object's type -+ * @obj_id: Object's ID -+ * @label: The required label. The maximum length is 16 chars. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_obj_label(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ char *label); -+ -+/** -+ * struct dprc_endpoint - Endpoint description for link connect/disconnect -+ * operations -+ * @type: Endpoint object type: NULL terminated string -+ * @id: Endpoint object ID -+ * @if_id: Interface ID; should be set for endpoints with multiple -+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0 -+ */ -+struct dprc_endpoint { -+ char type[16]; -+ int id; -+ uint16_t if_id; -+}; -+ -+/** -+ * struct dprc_connection_cfg - Connection configuration. -+ * Used for virtual connections only -+ * @committed_rate: Committed rate (Mbits/s) -+ * @max_rate: Maximum rate (Mbits/s) -+ */ -+struct dprc_connection_cfg { -+ uint32_t committed_rate; -+ uint32_t max_rate; -+}; -+ -+/** -+ * dprc_connect() - Connect two endpoints to create a network link between them -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @endpoint1: Endpoint 1 configuration parameters -+ * @endpoint2: Endpoint 2 configuration parameters -+ * @cfg: Connection configuration. The connection configuration is ignored for -+ * connections made to DPMAC objects, where rate is retrieved from the -+ * MAC configuration. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_connect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ const struct dprc_endpoint *endpoint2, -+ const struct dprc_connection_cfg *cfg); -+ -+/** -+ * dprc_disconnect() - Disconnect one endpoint to remove its network connection -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @endpoint: Endpoint configuration parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_disconnect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint); -+ -+/** -+* dprc_get_connection() - Get connected endpoint and link status if connection -+* exists. -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPRC object -+* @endpoint1: Endpoint 1 configuration parameters -+* @endpoint2: Returned endpoint 2 configuration parameters -+* @state: Returned link state: -+* 1 - link is up; -+* 0 - link is down; -+* -1 - no connection (endpoint2 information is irrelevant) -+* -+* Return: '0' on Success; -ENAVAIL if connection does not exist. -+*/ -+int dprc_get_connection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ struct dprc_endpoint *endpoint2, -+ int *state); -+ -+#endif /* _FSL_DPRC_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dprc_cmd.h b/drivers/net/dpaa2/mc/fsl_dprc_cmd.h -new file mode 100644 -index 0000000..469e286 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dprc_cmd.h -@@ -0,0 +1,755 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPRC_CMD_H -+#define _FSL_DPRC_CMD_H -+ -+/* DPRC Version */ -+#define DPRC_VER_MAJOR 5 -+#define DPRC_VER_MINOR 1 -+ -+/* Command IDs */ -+#define DPRC_CMDID_CLOSE 0x800 -+#define DPRC_CMDID_OPEN 0x805 -+#define DPRC_CMDID_CREATE 0x905 -+ -+#define DPRC_CMDID_GET_ATTR 0x004 -+#define DPRC_CMDID_RESET_CONT 0x005 -+ -+#define DPRC_CMDID_SET_IRQ 0x010 -+#define DPRC_CMDID_GET_IRQ 0x011 -+#define DPRC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPRC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPRC_CMDID_SET_IRQ_MASK 0x014 -+#define DPRC_CMDID_GET_IRQ_MASK 0x015 -+#define DPRC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPRC_CMDID_CREATE_CONT 0x151 -+#define DPRC_CMDID_DESTROY_CONT 0x152 -+#define DPRC_CMDID_GET_CONT_ID 0x830 -+#define DPRC_CMDID_SET_RES_QUOTA 0x155 -+#define DPRC_CMDID_GET_RES_QUOTA 0x156 -+#define DPRC_CMDID_ASSIGN 0x157 -+#define DPRC_CMDID_UNASSIGN 0x158 -+#define DPRC_CMDID_GET_OBJ_COUNT 0x159 -+#define DPRC_CMDID_GET_OBJ 0x15A -+#define DPRC_CMDID_GET_RES_COUNT 0x15B -+#define DPRC_CMDID_GET_RES_IDS 0x15C -+#define DPRC_CMDID_GET_OBJ_REG 0x15E -+#define DPRC_CMDID_SET_OBJ_IRQ 0x15F -+#define DPRC_CMDID_GET_OBJ_IRQ 0x160 -+#define DPRC_CMDID_SET_OBJ_LABEL 0x161 -+#define DPRC_CMDID_GET_OBJ_DESC 0x162 -+ -+#define DPRC_CMDID_CONNECT 0x167 -+#define DPRC_CMDID_DISCONNECT 0x168 -+#define DPRC_CMDID_GET_POOL 0x169 -+#define DPRC_CMDID_GET_POOL_COUNT 0x16A -+ -+#define DPRC_CMDID_GET_CONNECTION 0x16C -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_CONTAINER_ID(cmd, container_id) \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, container_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_OPEN(cmd, container_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, container_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_CREATE_CONTAINER(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->icid); \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->portal_id); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, cfg->label[0]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, cfg->label[1]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, cfg->label[2]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, cfg->label[3]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, cfg->label[4]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, cfg->label[5]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, cfg->label[6]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, cfg->label[7]);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, cfg->label[8]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, cfg->label[9]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, cfg->label[10]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, cfg->label[11]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, cfg->label[12]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, cfg->label[13]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, cfg->label[14]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, cfg->label[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_CREATE_CONTAINER(cmd, child_container_id, child_portal_offset)\ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 32, int, child_container_id); \ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, child_portal_offset);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_DESTROY_CONTAINER(cmd, child_container_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_RESET_CONTAINER(cmd, child_container_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->container_id); \ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->icid); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options);\ -+ MC_RSP_OP(cmd, 1, 32, 32, int, attr->portal_id); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_RES_QUOTA(cmd, child_container_id, type, quota) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, quota);\ -+ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_RES_QUOTA(cmd, child_container_id, type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\ -+} while (0) -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_RES_QUOTA(cmd, quota) \ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, quota) -+ -+/* param, offset, width, type, arg_name */ -+#define DPRC_CMD_ASSIGN(cmd, container_id, res_req) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, container_id); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, res_req->options);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, res_req->num); \ -+ MC_CMD_OP(cmd, 1, 32, 32, int, res_req->id_base_align); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, res_req->type[0]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, res_req->type[1]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, res_req->type[2]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, res_req->type[3]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, res_req->type[4]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, res_req->type[5]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, res_req->type[6]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, res_req->type[7]);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, res_req->type[8]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, res_req->type[9]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, res_req->type[10]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, res_req->type[11]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, res_req->type[12]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, res_req->type[13]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, res_req->type[14]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, res_req->type[15]);\ -+} while (0) -+ -+/* param, offset, width, type, arg_name */ -+#define DPRC_CMD_UNASSIGN(cmd, child_container_id, res_req) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, res_req->options);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, res_req->num); \ -+ MC_CMD_OP(cmd, 1, 32, 32, int, res_req->id_base_align); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, res_req->type[0]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, res_req->type[1]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, res_req->type[2]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, res_req->type[3]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, res_req->type[4]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, res_req->type[5]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, res_req->type[6]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, res_req->type[7]);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, res_req->type[8]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, res_req->type[9]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, res_req->type[10]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, res_req->type[11]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, res_req->type[12]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, res_req->type[13]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, res_req->type[14]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, res_req->type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_POOL_COUNT(cmd, pool_count) \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, pool_count) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_POOL(cmd, pool_index) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, pool_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_POOL(cmd, type) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 8, char, type[0]);\ -+ MC_RSP_OP(cmd, 1, 8, 8, char, type[1]);\ -+ MC_RSP_OP(cmd, 1, 16, 8, char, type[2]);\ -+ MC_RSP_OP(cmd, 1, 24, 8, char, type[3]);\ -+ MC_RSP_OP(cmd, 1, 32, 8, char, type[4]);\ -+ MC_RSP_OP(cmd, 1, 40, 8, char, type[5]);\ -+ MC_RSP_OP(cmd, 1, 48, 8, char, type[6]);\ -+ MC_RSP_OP(cmd, 1, 56, 8, char, type[7]);\ -+ MC_RSP_OP(cmd, 2, 0, 8, char, type[8]);\ -+ MC_RSP_OP(cmd, 2, 8, 8, char, type[9]);\ -+ MC_RSP_OP(cmd, 2, 16, 8, char, type[10]);\ -+ MC_RSP_OP(cmd, 2, 24, 8, char, type[11]);\ -+ MC_RSP_OP(cmd, 2, 32, 8, char, type[12]);\ -+ MC_RSP_OP(cmd, 2, 40, 8, char, type[13]);\ -+ MC_RSP_OP(cmd, 2, 48, 8, char, type[14]);\ -+ MC_RSP_OP(cmd, 2, 56, 8, char, type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_OBJ_COUNT(cmd, obj_count) \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, obj_count) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_OBJ(cmd, obj_index) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_OBJ(cmd, obj_desc) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, obj_desc->id); \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, obj_desc->vendor); \ -+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, obj_desc->irq_count); \ -+ MC_RSP_OP(cmd, 1, 24, 8, uint8_t, obj_desc->region_count); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, obj_desc->state);\ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, obj_desc->ver_major);\ -+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, obj_desc->ver_minor);\ -+ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, obj_desc->flags); \ -+ MC_RSP_OP(cmd, 3, 0, 8, char, obj_desc->type[0]);\ -+ MC_RSP_OP(cmd, 3, 8, 8, char, obj_desc->type[1]);\ -+ MC_RSP_OP(cmd, 3, 16, 8, char, obj_desc->type[2]);\ -+ MC_RSP_OP(cmd, 3, 24, 8, char, obj_desc->type[3]);\ -+ MC_RSP_OP(cmd, 3, 32, 8, char, obj_desc->type[4]);\ -+ MC_RSP_OP(cmd, 3, 40, 8, char, obj_desc->type[5]);\ -+ MC_RSP_OP(cmd, 3, 48, 8, char, obj_desc->type[6]);\ -+ MC_RSP_OP(cmd, 3, 56, 8, char, obj_desc->type[7]);\ -+ MC_RSP_OP(cmd, 4, 0, 8, char, obj_desc->type[8]);\ -+ MC_RSP_OP(cmd, 4, 8, 8, char, obj_desc->type[9]);\ -+ MC_RSP_OP(cmd, 4, 16, 8, char, obj_desc->type[10]);\ -+ MC_RSP_OP(cmd, 4, 24, 8, char, obj_desc->type[11]);\ -+ MC_RSP_OP(cmd, 4, 32, 8, char, obj_desc->type[12]);\ -+ MC_RSP_OP(cmd, 4, 40, 8, char, obj_desc->type[13]);\ -+ MC_RSP_OP(cmd, 4, 48, 8, char, obj_desc->type[14]);\ -+ MC_RSP_OP(cmd, 4, 56, 8, char, obj_desc->type[15]);\ -+ MC_RSP_OP(cmd, 5, 0, 8, char, obj_desc->label[0]);\ -+ MC_RSP_OP(cmd, 5, 8, 8, char, obj_desc->label[1]);\ -+ MC_RSP_OP(cmd, 5, 16, 8, char, obj_desc->label[2]);\ -+ MC_RSP_OP(cmd, 5, 24, 8, char, obj_desc->label[3]);\ -+ MC_RSP_OP(cmd, 5, 32, 8, char, obj_desc->label[4]);\ -+ MC_RSP_OP(cmd, 5, 40, 8, char, obj_desc->label[5]);\ -+ MC_RSP_OP(cmd, 5, 48, 8, char, obj_desc->label[6]);\ -+ MC_RSP_OP(cmd, 5, 56, 8, char, obj_desc->label[7]);\ -+ MC_RSP_OP(cmd, 6, 0, 8, char, obj_desc->label[8]);\ -+ MC_RSP_OP(cmd, 6, 8, 8, char, obj_desc->label[9]);\ -+ MC_RSP_OP(cmd, 6, 16, 8, char, obj_desc->label[10]);\ -+ MC_RSP_OP(cmd, 6, 24, 8, char, obj_desc->label[11]);\ -+ MC_RSP_OP(cmd, 6, 32, 8, char, obj_desc->label[12]);\ -+ MC_RSP_OP(cmd, 6, 40, 8, char, obj_desc->label[13]);\ -+ MC_RSP_OP(cmd, 6, 48, 8, char, obj_desc->label[14]);\ -+ MC_RSP_OP(cmd, 6, 56, 8, char, obj_desc->label[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_OBJ_DESC(cmd, obj_type, obj_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id);\ -+ MC_CMD_OP(cmd, 1, 0, 8, char, obj_type[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, obj_type[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, obj_type[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, obj_type[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, obj_type[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, obj_type[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, obj_type[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, obj_type[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, obj_type[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, obj_type[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, obj_type[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, obj_type[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, obj_type[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, obj_type[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, obj_type[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, obj_type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_OBJ_DESC(cmd, obj_desc) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, obj_desc->id); \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, obj_desc->vendor); \ -+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, obj_desc->irq_count); \ -+ MC_RSP_OP(cmd, 1, 24, 8, uint8_t, obj_desc->region_count); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, obj_desc->state);\ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, obj_desc->ver_major);\ -+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, obj_desc->ver_minor);\ -+ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, obj_desc->flags); \ -+ MC_RSP_OP(cmd, 3, 0, 8, char, obj_desc->type[0]);\ -+ MC_RSP_OP(cmd, 3, 8, 8, char, obj_desc->type[1]);\ -+ MC_RSP_OP(cmd, 3, 16, 8, char, obj_desc->type[2]);\ -+ MC_RSP_OP(cmd, 3, 24, 8, char, obj_desc->type[3]);\ -+ MC_RSP_OP(cmd, 3, 32, 8, char, obj_desc->type[4]);\ -+ MC_RSP_OP(cmd, 3, 40, 8, char, obj_desc->type[5]);\ -+ MC_RSP_OP(cmd, 3, 48, 8, char, obj_desc->type[6]);\ -+ MC_RSP_OP(cmd, 3, 56, 8, char, obj_desc->type[7]);\ -+ MC_RSP_OP(cmd, 4, 0, 8, char, obj_desc->type[8]);\ -+ MC_RSP_OP(cmd, 4, 8, 8, char, obj_desc->type[9]);\ -+ MC_RSP_OP(cmd, 4, 16, 8, char, obj_desc->type[10]);\ -+ MC_RSP_OP(cmd, 4, 24, 8, char, obj_desc->type[11]);\ -+ MC_RSP_OP(cmd, 4, 32, 8, char, obj_desc->type[12]);\ -+ MC_RSP_OP(cmd, 4, 40, 8, char, obj_desc->type[13]);\ -+ MC_RSP_OP(cmd, 4, 48, 8, char, obj_desc->type[14]);\ -+ MC_RSP_OP(cmd, 4, 56, 8, char, obj_desc->type[15]);\ -+ MC_RSP_OP(cmd, 5, 0, 8, char, obj_desc->label[0]);\ -+ MC_RSP_OP(cmd, 5, 8, 8, char, obj_desc->label[1]);\ -+ MC_RSP_OP(cmd, 5, 16, 8, char, obj_desc->label[2]);\ -+ MC_RSP_OP(cmd, 5, 24, 8, char, obj_desc->label[3]);\ -+ MC_RSP_OP(cmd, 5, 32, 8, char, obj_desc->label[4]);\ -+ MC_RSP_OP(cmd, 5, 40, 8, char, obj_desc->label[5]);\ -+ MC_RSP_OP(cmd, 5, 48, 8, char, obj_desc->label[6]);\ -+ MC_RSP_OP(cmd, 5, 56, 8, char, obj_desc->label[7]);\ -+ MC_RSP_OP(cmd, 6, 0, 8, char, obj_desc->label[8]);\ -+ MC_RSP_OP(cmd, 6, 8, 8, char, obj_desc->label[9]);\ -+ MC_RSP_OP(cmd, 6, 16, 8, char, obj_desc->label[10]);\ -+ MC_RSP_OP(cmd, 6, 24, 8, char, obj_desc->label[11]);\ -+ MC_RSP_OP(cmd, 6, 32, 8, char, obj_desc->label[12]);\ -+ MC_RSP_OP(cmd, 6, 40, 8, char, obj_desc->label[13]);\ -+ MC_RSP_OP(cmd, 6, 48, 8, char, obj_desc->label[14]);\ -+ MC_RSP_OP(cmd, 6, 56, 8, char, obj_desc->label[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_RES_COUNT(cmd, type) \ -+do { \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_RES_COUNT(cmd, res_count) \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, res_count) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_RES_IDS(cmd, range_desc, type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 42, 7, enum dprc_iter_status, \ -+ range_desc->iter_status); \ -+ MC_CMD_OP(cmd, 1, 0, 32, int, range_desc->base_id); \ -+ MC_CMD_OP(cmd, 1, 32, 32, int, range_desc->last_id);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, type[0]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, type[1]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, type[2]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, type[3]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, type[4]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, type[5]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, type[6]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, type[7]);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, type[8]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, type[9]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, type[10]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, type[11]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, type[12]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, type[13]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, type[14]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_RES_IDS(cmd, range_desc) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 42, 7, enum dprc_iter_status, \ -+ range_desc->iter_status);\ -+ MC_RSP_OP(cmd, 1, 0, 32, int, range_desc->base_id); \ -+ MC_RSP_OP(cmd, 1, 32, 32, int, range_desc->last_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_OBJ_REGION(cmd, obj_type, obj_id, region_index) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, region_index);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\ -+ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\ -+ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\ -+ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\ -+ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\ -+ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\ -+ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\ -+ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\ -+ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\ -+} while (0) -+ -+/* param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_OBJ_REGION(cmd, region_desc) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, region_desc->base_offset);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, region_desc->size); \ -+ MC_RSP_OP(cmd, 2, 32, 4, enum dprc_region_type, region_desc->type);\ -+ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, region_desc->flags);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_OBJ_LABEL(cmd, obj_type, obj_id, label) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, label[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, label[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, label[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, label[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, label[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, label[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, label[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, label[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, label[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, label[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, label[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, label[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, label[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, label[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, label[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, label[15]);\ -+ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\ -+ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\ -+ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\ -+ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\ -+ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\ -+ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\ -+ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\ -+ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\ -+ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_SET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, obj_id); \ -+ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\ -+ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\ -+ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\ -+ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\ -+ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\ -+ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\ -+ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\ -+ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\ -+ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\ -+ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\ -+ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\ -+ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\ -+ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\ -+ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\ -+ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\ -+ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, obj_type[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, char, obj_type[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, char, obj_type[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, char, obj_type[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, char, obj_type[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, char, obj_type[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, char, obj_type[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, char, obj_type[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 8, char, obj_type[8]);\ -+ MC_CMD_OP(cmd, 2, 8, 8, char, obj_type[9]);\ -+ MC_CMD_OP(cmd, 2, 16, 8, char, obj_type[10]);\ -+ MC_CMD_OP(cmd, 2, 24, 8, char, obj_type[11]);\ -+ MC_CMD_OP(cmd, 2, 32, 8, char, obj_type[12]);\ -+ MC_CMD_OP(cmd, 2, 40, 8, char, obj_type[13]);\ -+ MC_CMD_OP(cmd, 2, 48, 8, char, obj_type[14]);\ -+ MC_CMD_OP(cmd, 2, 56, 8, char, obj_type[15]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_OBJ_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_CONNECT(cmd, endpoint1, endpoint2, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint1->id); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint1->if_id); \ -+ MC_CMD_OP(cmd, 1, 0, 32, int, endpoint2->id); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, endpoint2->if_id); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint1->type[0]); \ -+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint1->type[1]); \ -+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint1->type[2]); \ -+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint1->type[3]); \ -+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint1->type[4]); \ -+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint1->type[5]); \ -+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint1->type[6]); \ -+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint1->type[7]); \ -+ MC_CMD_OP(cmd, 3, 0, 8, char, endpoint1->type[8]); \ -+ MC_CMD_OP(cmd, 3, 8, 8, char, endpoint1->type[9]); \ -+ MC_CMD_OP(cmd, 3, 16, 8, char, endpoint1->type[10]); \ -+ MC_CMD_OP(cmd, 3, 24, 8, char, endpoint1->type[11]); \ -+ MC_CMD_OP(cmd, 3, 32, 8, char, endpoint1->type[12]); \ -+ MC_CMD_OP(cmd, 3, 40, 8, char, endpoint1->type[13]); \ -+ MC_CMD_OP(cmd, 3, 48, 8, char, endpoint1->type[14]); \ -+ MC_CMD_OP(cmd, 3, 56, 8, char, endpoint1->type[15]); \ -+ MC_CMD_OP(cmd, 4, 0, 32, uint32_t, cfg->max_rate); \ -+ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->committed_rate); \ -+ MC_CMD_OP(cmd, 5, 0, 8, char, endpoint2->type[0]); \ -+ MC_CMD_OP(cmd, 5, 8, 8, char, endpoint2->type[1]); \ -+ MC_CMD_OP(cmd, 5, 16, 8, char, endpoint2->type[2]); \ -+ MC_CMD_OP(cmd, 5, 24, 8, char, endpoint2->type[3]); \ -+ MC_CMD_OP(cmd, 5, 32, 8, char, endpoint2->type[4]); \ -+ MC_CMD_OP(cmd, 5, 40, 8, char, endpoint2->type[5]); \ -+ MC_CMD_OP(cmd, 5, 48, 8, char, endpoint2->type[6]); \ -+ MC_CMD_OP(cmd, 5, 56, 8, char, endpoint2->type[7]); \ -+ MC_CMD_OP(cmd, 6, 0, 8, char, endpoint2->type[8]); \ -+ MC_CMD_OP(cmd, 6, 8, 8, char, endpoint2->type[9]); \ -+ MC_CMD_OP(cmd, 6, 16, 8, char, endpoint2->type[10]); \ -+ MC_CMD_OP(cmd, 6, 24, 8, char, endpoint2->type[11]); \ -+ MC_CMD_OP(cmd, 6, 32, 8, char, endpoint2->type[12]); \ -+ MC_CMD_OP(cmd, 6, 40, 8, char, endpoint2->type[13]); \ -+ MC_CMD_OP(cmd, 6, 48, 8, char, endpoint2->type[14]); \ -+ MC_CMD_OP(cmd, 6, 56, 8, char, endpoint2->type[15]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_DISCONNECT(cmd, endpoint) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint->id); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint->if_id); \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, endpoint->type[0]); \ -+ MC_CMD_OP(cmd, 1, 8, 8, char, endpoint->type[1]); \ -+ MC_CMD_OP(cmd, 1, 16, 8, char, endpoint->type[2]); \ -+ MC_CMD_OP(cmd, 1, 24, 8, char, endpoint->type[3]); \ -+ MC_CMD_OP(cmd, 1, 32, 8, char, endpoint->type[4]); \ -+ MC_CMD_OP(cmd, 1, 40, 8, char, endpoint->type[5]); \ -+ MC_CMD_OP(cmd, 1, 48, 8, char, endpoint->type[6]); \ -+ MC_CMD_OP(cmd, 1, 56, 8, char, endpoint->type[7]); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint->type[8]); \ -+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint->type[9]); \ -+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint->type[10]); \ -+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint->type[11]); \ -+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint->type[12]); \ -+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint->type[13]); \ -+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint->type[14]); \ -+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint->type[15]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_CMD_GET_CONNECTION(cmd, endpoint1) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint1->id); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint1->if_id); \ -+ MC_CMD_OP(cmd, 1, 0, 8, char, endpoint1->type[0]); \ -+ MC_CMD_OP(cmd, 1, 8, 8, char, endpoint1->type[1]); \ -+ MC_CMD_OP(cmd, 1, 16, 8, char, endpoint1->type[2]); \ -+ MC_CMD_OP(cmd, 1, 24, 8, char, endpoint1->type[3]); \ -+ MC_CMD_OP(cmd, 1, 32, 8, char, endpoint1->type[4]); \ -+ MC_CMD_OP(cmd, 1, 40, 8, char, endpoint1->type[5]); \ -+ MC_CMD_OP(cmd, 1, 48, 8, char, endpoint1->type[6]); \ -+ MC_CMD_OP(cmd, 1, 56, 8, char, endpoint1->type[7]); \ -+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint1->type[8]); \ -+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint1->type[9]); \ -+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint1->type[10]); \ -+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint1->type[11]); \ -+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint1->type[12]); \ -+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint1->type[13]); \ -+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint1->type[14]); \ -+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint1->type[15]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRC_RSP_GET_CONNECTION(cmd, endpoint2, state) \ -+do { \ -+ MC_RSP_OP(cmd, 3, 0, 32, int, endpoint2->id); \ -+ MC_RSP_OP(cmd, 3, 32, 16, uint16_t, endpoint2->if_id); \ -+ MC_RSP_OP(cmd, 4, 0, 8, char, endpoint2->type[0]); \ -+ MC_RSP_OP(cmd, 4, 8, 8, char, endpoint2->type[1]); \ -+ MC_RSP_OP(cmd, 4, 16, 8, char, endpoint2->type[2]); \ -+ MC_RSP_OP(cmd, 4, 24, 8, char, endpoint2->type[3]); \ -+ MC_RSP_OP(cmd, 4, 32, 8, char, endpoint2->type[4]); \ -+ MC_RSP_OP(cmd, 4, 40, 8, char, endpoint2->type[5]); \ -+ MC_RSP_OP(cmd, 4, 48, 8, char, endpoint2->type[6]); \ -+ MC_RSP_OP(cmd, 4, 56, 8, char, endpoint2->type[7]); \ -+ MC_RSP_OP(cmd, 5, 0, 8, char, endpoint2->type[8]); \ -+ MC_RSP_OP(cmd, 5, 8, 8, char, endpoint2->type[9]); \ -+ MC_RSP_OP(cmd, 5, 16, 8, char, endpoint2->type[10]); \ -+ MC_RSP_OP(cmd, 5, 24, 8, char, endpoint2->type[11]); \ -+ MC_RSP_OP(cmd, 5, 32, 8, char, endpoint2->type[12]); \ -+ MC_RSP_OP(cmd, 5, 40, 8, char, endpoint2->type[13]); \ -+ MC_RSP_OP(cmd, 5, 48, 8, char, endpoint2->type[14]); \ -+ MC_RSP_OP(cmd, 5, 56, 8, char, endpoint2->type[15]); \ -+ MC_RSP_OP(cmd, 6, 0, 32, int, state); \ -+} while (0) -+ -+#endif /* _FSL_DPRC_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dprtc.h b/drivers/net/dpaa2/mc/fsl_dprtc.h -new file mode 100644 -index 0000000..cad0693 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dprtc.h -@@ -0,0 +1,434 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPRTC_H -+#define __FSL_DPRTC_H -+ -+/* Data Path Real Time Counter API -+ * Contains initialization APIs and runtime control APIs for RTC -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * Number of irq's -+ */ -+#define DPRTC_MAX_IRQ_NUM 1 -+#define DPRTC_IRQ_INDEX 0 -+ -+/** -+ * Interrupt event masks: -+ */ -+ -+/** -+ * Interrupt event mask indicating alarm event had occurred -+ */ -+#define DPRTC_EVENT_ALARM 0x40000000 -+/** -+ * Interrupt event mask indicating periodic pulse event had occurred -+ */ -+#define DPRTC_EVENT_PPS 0x08000000 -+ -+/** -+ * dprtc_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dprtc_id: DPRTC unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dprtc_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dprtc_id, -+ uint16_t *token); -+ -+/** -+ * dprtc_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dprtc_cfg - Structure representing DPRTC configuration -+ * @options: place holder -+ */ -+struct dprtc_cfg { -+ uint32_t options; -+}; -+ -+/** -+ * dprtc_create() - Create the DPRTC object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPRTC object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dprtc_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dprtc_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dprtc_destroy() - Destroy the DPRTC object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dprtc_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dprtc_set_clock_offset() - Sets the clock's offset -+ * (usually relative to another clock). -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @offset: New clock offset (in nanoseconds). -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int64_t offset); -+ -+/** -+ * dprtc_set_freq_compensation() - Sets a new frequency compensation value. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @freq_compensation: -+ * The new frequency compensation value to set. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t freq_compensation); -+ -+/** -+ * dprtc_get_freq_compensation() - Retrieves the frequency compensation value -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @freq_compensation: -+ * Frequency compensation value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint32_t *freq_compensation); -+ -+/** -+ * dprtc_get_time() - Returns the current RTC time. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @time: Current RTC time. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_time(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t *time); -+ -+/** -+ * dprtc_set_time() - Updates current RTC time. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @time: New RTC time. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_time(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t time); -+ -+/** -+ * dprtc_set_alarm() - Defines and sets alarm. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @time: In nanoseconds, the time when the alarm -+ * should go off - must be a multiple of -+ * 1 microsecond -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_alarm(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint64_t time); -+ -+/** -+ * struct dprtc_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dprtc_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dprtc_set_irq() - Set IRQ information for the DPRTC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprtc_irq_cfg *irq_cfg); -+ -+/** -+ * dprtc_get_irq() - Get IRQ information from the DPRTC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprtc_irq_cfg *irq_cfg); -+ -+/** -+ * dprtc_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dprtc_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dprtc_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dprtc_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dprtc_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dprtc_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dprtc_attr - Structure representing DPRTC attributes -+ * @id: DPRTC object ID -+ * @version: DPRTC version -+ */ -+struct dprtc_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPRTC version -+ * @major: DPRTC major version -+ * @minor: DPRTC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dprtc_get_attributes - Retrieve DPRTC attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRTC object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprtc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprtc_attr *attr); -+ -+#endif /* __FSL_DPRTC_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h b/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h -new file mode 100644 -index 0000000..aeccece ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h -@@ -0,0 +1,181 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPRTC_CMD_H -+#define _FSL_DPRTC_CMD_H -+ -+/* DPRTC Version */ -+#define DPRTC_VER_MAJOR 1 -+#define DPRTC_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPRTC_CMDID_CLOSE 0x800 -+#define DPRTC_CMDID_OPEN 0x810 -+#define DPRTC_CMDID_CREATE 0x910 -+#define DPRTC_CMDID_DESTROY 0x900 -+ -+#define DPRTC_CMDID_ENABLE 0x002 -+#define DPRTC_CMDID_DISABLE 0x003 -+#define DPRTC_CMDID_GET_ATTR 0x004 -+#define DPRTC_CMDID_RESET 0x005 -+#define DPRTC_CMDID_IS_ENABLED 0x006 -+ -+#define DPRTC_CMDID_SET_IRQ 0x010 -+#define DPRTC_CMDID_GET_IRQ 0x011 -+#define DPRTC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPRTC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPRTC_CMDID_SET_IRQ_MASK 0x014 -+#define DPRTC_CMDID_GET_IRQ_MASK 0x015 -+#define DPRTC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPRTC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPRTC_CMDID_SET_CLOCK_OFFSET 0x1d0 -+#define DPRTC_CMDID_SET_FREQ_COMPENSATION 0x1d1 -+#define DPRTC_CMDID_GET_FREQ_COMPENSATION 0x1d2 -+#define DPRTC_CMDID_GET_TIME 0x1d3 -+#define DPRTC_CMDID_SET_TIME 0x1d4 -+#define DPRTC_CMDID_SET_ALARM 0x1d5 -+#define DPRTC_CMDID_SET_PERIODIC_PULSE 0x1d6 -+#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE 0x1d7 -+#define DPRTC_CMDID_SET_EXT_TRIGGER 0x1d8 -+#define DPRTC_CMDID_CLEAR_EXT_TRIGGER 0x1d9 -+#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP 0x1dA -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_OPEN(cmd, dpbp_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_CLOCK_OFFSET(cmd, offset) \ -+ MC_CMD_OP(cmd, 0, 0, 64, int64_t, offset) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_FREQ_COMPENSATION(cmd, freq_compensation) \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, freq_compensation) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_FREQ_COMPENSATION(cmd, freq_compensation) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, freq_compensation) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_RSP_GET_TIME(cmd, time) \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, time) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_TIME(cmd, time) \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPRTC_CMD_SET_ALARM(cmd, time) \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time) -+ -+#endif /* _FSL_DPRTC_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpseci.h b/drivers/net/dpaa2/mc/fsl_dpseci.h -new file mode 100644 -index 0000000..1dd7215 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpseci.h -@@ -0,0 +1,647 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPSECI_H -+#define __FSL_DPSECI_H -+ -+/* Data Path SEC Interface API -+ * Contains initialization APIs and runtime control APIs for DPSECI -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * General DPSECI macros -+ */ -+ -+/** -+ * Maximum number of Tx/Rx priorities per DPSECI object -+ */ -+#define DPSECI_PRIO_NUM 8 -+ -+/** -+ * All queues considered; see dpseci_set_rx_queue() -+ */ -+#define DPSECI_ALL_QUEUES (uint8_t)(-1) -+ -+/** -+ * dpseci_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpseci_id: DPSECI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpseci_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpseci_id, -+ uint16_t *token); -+ -+/** -+ * dpseci_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpseci_cfg - Structure representing DPSECI configuration -+ * @num_tx_queues: num of queues towards the SEC -+ * @num_rx_queues: num of queues back from the SEC -+ * @priorities: Priorities for the SEC hardware processing; -+ * each place in the array is the priority of the tx queue -+ * towards the SEC, -+ * valid priorities are configured with values 1-8; -+ */ -+struct dpseci_cfg { -+ uint8_t num_tx_queues; -+ uint8_t num_rx_queues; -+ uint8_t priorities[DPSECI_PRIO_NUM]; -+}; -+ -+/** -+ * dpseci_create() - Create the DPSECI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPSECI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpseci_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpseci_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpseci_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpseci_is_enabled() - Check if the DPSECI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpseci_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpseci_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpseci_set_irq() - Set IRQ information for the DPSECI to trigger an interrupt -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpseci_irq_cfg *irq_cfg); -+ -+/** -+ * dpseci_get_irq() - Get IRQ information from the DPSECI -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpseci_irq_cfg *irq_cfg); -+ -+/** -+ * dpseci_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpseci_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned Interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpseci_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpseci_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpseci_get_irq_status() - Get the current status of any pending interrupts -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpseci_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpseci_attr - Structure representing DPSECI attributes -+ * @id: DPSECI object ID -+ * @version: DPSECI version -+ * @num_tx_queues: number of queues towards the SEC -+ * @num_rx_queues: number of queues back from the SEC -+ */ -+struct dpseci_attr { -+ int id; -+ /** -+ * struct version - DPSECI version -+ * @major: DPSECI major version -+ * @minor: DPSECI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint8_t num_tx_queues; -+ uint8_t num_rx_queues; -+}; -+ -+/** -+ * dpseci_get_attributes() - Retrieve DPSECI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_attr *attr); -+ -+/** -+ * enum dpseci_dest - DPSECI destination types -+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode -+ * and does not generate FQDAN notifications; user is expected to -+ * dequeue from the queue based on polling or other user-defined -+ * method -+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to dequeue -+ * from the queue only after notification is received -+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified DPCON -+ * object; user is expected to dequeue from the DPCON channel -+ */ -+enum dpseci_dest { -+ DPSECI_DEST_NONE = 0, -+ DPSECI_DEST_DPIO = 1, -+ DPSECI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPSECI_DEST_NONE' option -+ */ -+struct dpseci_dest_cfg { -+ enum dpseci_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/** -+ * DPSECI queue modification options -+ */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001 -+ -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPSECI_QUEUE_OPT_DEST 0x00000002 -+ -+/** -+ * Select to modify the queue's order preservation -+ */ -+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004 -+ -+/** -+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPSECI_QUEUE_OPT_' flags -+ * @order_preservation_en: order preservation configuration for the rx queue -+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options' -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; -+ * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options' -+ */ -+struct dpseci_rx_queue_cfg { -+ uint32_t options; -+ int order_preservation_en; -+ uint64_t user_ctx; -+ struct dpseci_dest_cfg dest_cfg; -+}; -+ -+/** -+ * dpseci_set_rx_queue() - Set Rx queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @queue: Select the queue relative to number of -+ * priorities configured at DPSECI creation; use -+ * DPSECI_ALL_QUEUES to configure all Rx queues identically. -+ * @cfg: Rx queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ const struct dpseci_rx_queue_cfg *cfg); -+ -+/** -+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @order_preservation_en: Status of the order preservation configuration -+ * on the queue -+ * @dest_cfg: Queue destination configuration -+ * @fqid: Virtual FQID value to be used for dequeue operations -+ */ -+struct dpseci_rx_queue_attr { -+ uint64_t user_ctx; -+ int order_preservation_en; -+ struct dpseci_dest_cfg dest_cfg; -+ uint32_t fqid; -+}; -+ -+/** -+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @queue: Select the queue relative to number of -+ * priorities configured at DPSECI creation -+ * @attr: Returned Rx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ struct dpseci_rx_queue_attr *attr); -+ -+/** -+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues -+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware -+ * @priority: SEC hardware processing priority for the queue -+ */ -+struct dpseci_tx_queue_attr { -+ uint32_t fqid; -+ uint8_t priority; -+}; -+ -+/** -+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @queue: Select the queue relative to number of -+ * priorities configured at DPSECI creation -+ * @attr: Returned Tx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t queue, -+ struct dpseci_tx_queue_attr *attr); -+ -+/** -+ * struct dpseci_sec_attr - Structure representing attributes of the SEC -+ * hardware accelerator -+ * @ip_id: ID for SEC. -+ * @major_rev: Major revision number for SEC. -+ * @minor_rev: Minor revision number for SEC. -+ * @era: SEC Era. -+ * @deco_num: The number of copies of the DECO that are implemented in -+ * this version of SEC. -+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented -+ * in this version of SEC. -+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented -+ * in this version of SEC. -+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are -+ * implemented in this version of SEC. -+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are -+ * implemented in this version of SEC. -+ * @crc_acc_num: The number of copies of the CRC module that are implemented -+ * in this version of SEC. -+ * @pk_acc_num: The number of copies of the Public Key module that are -+ * implemented in this version of SEC. -+ * @kasumi_acc_num: The number of copies of the Kasumi module that are -+ * implemented in this version of SEC. -+ * @rng_acc_num: The number of copies of the Random Number Generator that are -+ * implemented in this version of SEC. -+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are -+ * implemented in this version of SEC. -+ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented -+ * in this version of SEC. -+ * @des_acc_num: The number of copies of the DES module that are implemented -+ * in this version of SEC. -+ * @aes_acc_num: The number of copies of the AES module that are implemented -+ * in this version of SEC. -+ **/ -+ -+struct dpseci_sec_attr { -+ uint16_t ip_id; -+ uint8_t major_rev; -+ uint8_t minor_rev; -+ uint8_t era; -+ uint8_t deco_num; -+ uint8_t zuc_auth_acc_num; -+ uint8_t zuc_enc_acc_num; -+ uint8_t snow_f8_acc_num; -+ uint8_t snow_f9_acc_num; -+ uint8_t crc_acc_num; -+ uint8_t pk_acc_num; -+ uint8_t kasumi_acc_num; -+ uint8_t rng_acc_num; -+ uint8_t md_acc_num; -+ uint8_t arc4_acc_num; -+ uint8_t des_acc_num; -+ uint8_t aes_acc_num; -+}; -+ -+/** -+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @attr: Returned SEC attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_sec_attr *attr); -+ -+/** -+ * struct dpseci_sec_counters - Structure representing global SEC counters and -+ * not per dpseci counters -+ * @dequeued_requests: Number of Requests Dequeued -+ * @ob_enc_requests: Number of Outbound Encrypt Requests -+ * @ib_dec_requests: Number of Inbound Decrypt Requests -+ * @ob_enc_bytes: Number of Outbound Bytes Encrypted -+ * @ob_prot_bytes: Number of Outbound Bytes Protected -+ * @ib_dec_bytes: Number of Inbound Bytes Decrypted -+ * @ib_valid_bytes: Number of Inbound Bytes Validated -+ */ -+struct dpseci_sec_counters { -+ uint64_t dequeued_requests; -+ uint64_t ob_enc_requests; -+ uint64_t ib_dec_requests; -+ uint64_t ob_enc_bytes; -+ uint64_t ob_prot_bytes; -+ uint64_t ib_dec_bytes; -+ uint64_t ib_valid_bytes; -+}; -+ -+/** -+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSECI object -+ * @counters: Returned SEC counters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpseci_sec_counters *counters); -+ -+#endif /* __FSL_DPSECI_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h b/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h -new file mode 100644 -index 0000000..6c0b96e ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h -@@ -0,0 +1,241 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPSECI_CMD_H -+#define _FSL_DPSECI_CMD_H -+ -+/* DPSECI Version */ -+#define DPSECI_VER_MAJOR 3 -+#define DPSECI_VER_MINOR 1 -+ -+/* Command IDs */ -+#define DPSECI_CMDID_CLOSE 0x800 -+#define DPSECI_CMDID_OPEN 0x809 -+#define DPSECI_CMDID_CREATE 0x909 -+#define DPSECI_CMDID_DESTROY 0x900 -+ -+#define DPSECI_CMDID_ENABLE 0x002 -+#define DPSECI_CMDID_DISABLE 0x003 -+#define DPSECI_CMDID_GET_ATTR 0x004 -+#define DPSECI_CMDID_RESET 0x005 -+#define DPSECI_CMDID_IS_ENABLED 0x006 -+ -+#define DPSECI_CMDID_SET_IRQ 0x010 -+#define DPSECI_CMDID_GET_IRQ 0x011 -+#define DPSECI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPSECI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPSECI_CMDID_SET_IRQ_MASK 0x014 -+#define DPSECI_CMDID_GET_IRQ_MASK 0x015 -+#define DPSECI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPSECI_CMDID_SET_RX_QUEUE 0x194 -+#define DPSECI_CMDID_GET_RX_QUEUE 0x196 -+#define DPSECI_CMDID_GET_TX_QUEUE 0x197 -+#define DPSECI_CMDID_GET_SEC_ATTR 0x198 -+#define DPSECI_CMDID_GET_SEC_COUNTERS 0x199 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_OPEN(cmd, dpseci_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->priorities[0]);\ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[1]);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[2]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->priorities[3]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priorities[4]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->priorities[5]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->priorities[6]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->priorities[7]);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->num_tx_queues);\ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->num_rx_queues);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \ -+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_tx_queues); \ -+ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->num_rx_queues); \ -+ MC_RSP_OP(cmd, 5, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 5, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue); \ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpseci_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+ MC_CMD_OP(cmd, 2, 32, 1, int, cfg->order_preservation_en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_RX_QUEUE(cmd, queue) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_RX_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 48, 4, enum dpseci_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\ -+ MC_RSP_OP(cmd, 2, 32, 1, int, attr->order_preservation_en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_CMD_GET_TX_QUEUE(cmd, queue) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_TX_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid);\ -+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->priority);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_SEC_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->ip_id);\ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->major_rev);\ -+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->minor_rev);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->era);\ -+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->deco_num);\ -+ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->zuc_auth_acc_num);\ -+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, attr->zuc_enc_acc_num);\ -+ MC_RSP_OP(cmd, 1, 32, 8, uint8_t, attr->snow_f8_acc_num);\ -+ MC_RSP_OP(cmd, 1, 40, 8, uint8_t, attr->snow_f9_acc_num);\ -+ MC_RSP_OP(cmd, 1, 48, 8, uint8_t, attr->crc_acc_num);\ -+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->pk_acc_num);\ -+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->kasumi_acc_num);\ -+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->rng_acc_num);\ -+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->md_acc_num);\ -+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->arc4_acc_num);\ -+ MC_RSP_OP(cmd, 2, 48, 8, uint8_t, attr->des_acc_num);\ -+ MC_RSP_OP(cmd, 2, 56, 8, uint8_t, attr->aes_acc_num);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, counters->dequeued_requests);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counters->ob_enc_requests);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, counters->ib_dec_requests);\ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, counters->ob_enc_bytes);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, counters->ob_prot_bytes);\ -+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, counters->ib_dec_bytes);\ -+ MC_RSP_OP(cmd, 6, 0, 64, uint64_t, counters->ib_valid_bytes);\ -+} while (0) -+ -+#endif /* _FSL_DPSECI_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpsw.h b/drivers/net/dpaa2/mc/fsl_dpsw.h -new file mode 100644 -index 0000000..9c1bd9d ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpsw.h -@@ -0,0 +1,2164 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPSW_H -+#define __FSL_DPSW_H -+ -+#include -+ -+/* Data Path L2-Switch API -+ * Contains API for handling DPSW topology and functionality -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * DPSW general definitions -+ */ -+ -+/** -+ * Maximum number of traffic class priorities -+ */ -+#define DPSW_MAX_PRIORITIES 8 -+/** -+ * Maximum number of interfaces -+ */ -+#define DPSW_MAX_IF 64 -+ -+/** -+ * dpsw_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpsw_id: DPSW unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpsw_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpsw_id, -+ uint16_t *token); -+ -+/** -+ * dpsw_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPSW options -+ */ -+ -+/** -+ * Disable flooding -+ */ -+#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL -+/** -+ * Disable Multicast -+ */ -+#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL -+/** -+ * Support control interface -+ */ -+#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL -+/** -+ * Disable flooding metering -+ */ -+#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL -+/** -+ * Enable metering -+ */ -+#define DPSW_OPT_METERING_EN 0x0000000000000040ULL -+ -+/** -+ * enum dpsw_component_type - component type of a bridge -+ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an -+ * enterprise VLAN bridge or of a Provider Bridge used -+ * to process C-tagged frames -+ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a -+ * Provider Bridge -+ * -+ */ -+enum dpsw_component_type { -+ DPSW_COMPONENT_TYPE_C_VLAN = 0, -+ DPSW_COMPONENT_TYPE_S_VLAN -+}; -+ -+/** -+ * struct dpsw_cfg - DPSW configuration -+ * @num_ifs: Number of external and internal interfaces -+ * @adv: Advanced parameters; default is all zeros; -+ * use this structure to change default settings -+ */ -+struct dpsw_cfg { -+ uint16_t num_ifs; -+ /** -+ * struct adv - Advanced parameters -+ * @options: Enable/Disable DPSW features (bitmap) -+ * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16 -+ * @max_meters_per_if: Number of meters per interface -+ * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16 -+ * @max_fdb_entries: Number of FDB entries for default FDB table; -+ * 0 - indicates default 1024 entries. -+ * @fdb_aging_time: Default FDB aging time for default FDB table; -+ * 0 - indicates default 300 seconds -+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table; -+ * 0 - indicates default 32 -+ * @component_type: Indicates the component type of this bridge -+ */ -+ struct { -+ uint64_t options; -+ uint16_t max_vlans; -+ uint8_t max_meters_per_if; -+ uint8_t max_fdbs; -+ uint16_t max_fdb_entries; -+ uint16_t fdb_aging_time; -+ uint16_t max_fdb_mc_groups; -+ enum dpsw_component_type component_type; -+ } adv; -+}; -+ -+/** -+ * dpsw_create() - Create the DPSW object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPSW object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpsw_open() function to get an authentication -+ * token first -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpsw_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpsw_destroy() - Destroy the DPSW object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpsw_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpsw_enable() - Enable DPSW functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpsw_disable() - Disable DPSW functionality -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpsw_is_enabled() - Check if the DPSW is enabled -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpsw_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpsw_reset() - Reset the DPSW, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPSW IRQ Index and Events -+ */ -+ -+#define DPSW_IRQ_INDEX_IF 0x0000 -+#define DPSW_IRQ_INDEX_L2SW 0x0001 -+ -+/** -+ * IRQ event - Indicates that the link state changed -+ */ -+#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001 -+ -+/** -+ * struct dpsw_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpsw_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpsw_irq_cfg *irq_cfg); -+ -+/** -+ * dpsw_get_irq() - Get IRQ information from the DPSW -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpsw_irq_cfg *irq_cfg); -+ -+/** -+ * dpsw_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpsw_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned Interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpsw_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpsw_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpsw_get_irq_status() - Get the current status of any pending interrupts -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpsw_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+/** -+ * struct dpsw_attr - Structure representing DPSW attributes -+ * @id: DPSW object ID -+ * @version: DPSW version -+ * @options: Enable/Disable DPSW features -+ * @max_vlans: Maximum Number of VLANs -+ * @max_meters_per_if: Number of meters per interface -+ * @max_fdbs: Maximum Number of FDBs -+ * @max_fdb_entries: Number of FDB entries for default FDB table; -+ * 0 - indicates default 1024 entries. -+ * @fdb_aging_time: Default FDB aging time for default FDB table; -+ * 0 - indicates default 300 seconds -+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table; -+ * 0 - indicates default 32 -+ * @mem_size: DPSW frame storage memory size -+ * @num_ifs: Number of interfaces -+ * @num_vlans: Current number of VLANs -+ * @num_fdbs: Current number of FDBs -+ * @component_type: Component type of this bridge -+ */ -+struct dpsw_attr { -+ int id; -+ /** -+ * struct version - DPSW version -+ * @major: DPSW major version -+ * @minor: DPSW minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint64_t options; -+ uint16_t max_vlans; -+ uint8_t max_meters_per_if; -+ uint8_t max_fdbs; -+ uint16_t max_fdb_entries; -+ uint16_t fdb_aging_time; -+ uint16_t max_fdb_mc_groups; -+ uint16_t num_ifs; -+ uint16_t mem_size; -+ uint16_t num_vlans; -+ uint8_t num_fdbs; -+ enum dpsw_component_type component_type; -+}; -+ -+/** -+ * dpsw_get_attributes() - Retrieve DPSW attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @attr: Returned DPSW attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_attr *attr); -+ -+/** -+ * dpsw_set_reflection_if() - Set target interface for reflected interfaces. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Id -+ * -+ * Only one reflection receive interface is allowed per switch -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id); -+ -+/** -+ * enum dpsw_action - Action selection for special/control frames -+ * @DPSW_ACTION_DROP: Drop frame -+ * @DPSW_ACTION_REDIRECT: Redirect frame to control port -+ */ -+enum dpsw_action { -+ DPSW_ACTION_DROP = 0, -+ DPSW_ACTION_REDIRECT = 1 -+}; -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpsw_link_cfg - Structure representing DPSW link configuration -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPSW_LINK_OPT_' values -+ */ -+struct dpsw_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpsw_if_set_link_cfg() - set the link configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @cfg: Link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_cfg *cfg); -+/** -+ * struct dpsw_link_state - Structure representing DPSW link state -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPSW_LINK_OPT_' values -+ * @up: 0 - covers two cases: down and disconnected, 1 - up -+ */ -+struct dpsw_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpsw_if_get_link_state - Return the link state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: interface id -+ * @state: link state 1 - linkup, 0 - link down or disconnected -+ * -+ * @returns '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_link_state *state); -+ -+/** -+ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @en: 1 - enable, 0 - disable -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en); -+ -+/** -+ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @en: 1 - enable, 0 - disable -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en); -+ -+/** -+ * dpsw_if_set_multicast() - Enable/disable multicast for particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @en: 1 - enable, 0 - disable -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int en); -+ -+/** -+ * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration -+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers -+ * to the IEEE 802.1p priority -+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used -+ * separately or in conjunction with PCP to indicate frames -+ * eligible to be dropped in the presence of congestion -+ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN -+ * to which the frame belongs. The hexadecimal values -+ * of 0x000 and 0xFFF are reserved; -+ * all other values may be used as VLAN identifiers, -+ * allowing up to 4,094 VLANs -+ */ -+struct dpsw_tci_cfg { -+ uint8_t pcp; -+ uint8_t dei; -+ uint16_t vlan_id; -+}; -+ -+/** -+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Tag Control Information Configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tci_cfg *cfg); -+ -+/** -+ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Tag Control Information Configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_tci_cfg *cfg); -+ -+/** -+ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states -+ * @DPSW_STP_STATE_BLOCKING: Blocking state -+ * @DPSW_STP_STATE_LISTENING: Listening state -+ * @DPSW_STP_STATE_LEARNING: Learning state -+ * @DPSW_STP_STATE_FORWARDING: Forwarding state -+ * -+ */ -+enum dpsw_stp_state { -+ DPSW_STP_STATE_BLOCKING = 0, -+ DPSW_STP_STATE_LISTENING = 1, -+ DPSW_STP_STATE_LEARNING = 2, -+ DPSW_STP_STATE_FORWARDING = 3 -+}; -+ -+/** -+ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration -+ * @vlan_id: VLAN ID STP state -+ * @state: STP state -+ */ -+struct dpsw_stp_cfg { -+ uint16_t vlan_id; -+ enum dpsw_stp_state state; -+}; -+ -+/** -+ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: STP State configuration parameters -+ * -+ * The following STP states are supported - -+ * blocking, listening, learning, forwarding and disabled. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_stp_cfg *cfg); -+ -+/** -+ * enum dpsw_accepted_frames - Types of frames to accept -+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and -+ * priority tagged frames -+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or -+ * Priority-Tagged frames received on this interface. -+ * -+ */ -+enum dpsw_accepted_frames { -+ DPSW_ADMIT_ALL = 1, -+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3 -+}; -+ -+/** -+ * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration -+ * @type: Defines ingress accepted frames -+ * @unaccept_act: When a frame is not accepted, it may be discarded or -+ * redirected to control interface depending on this mode -+ */ -+struct dpsw_accepted_frames_cfg { -+ enum dpsw_accepted_frames type; -+ enum dpsw_action unaccept_act; -+}; -+ -+/** -+ * dpsw_if_set_accepted_frames() -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Frame types configuration -+ * -+ * When is admit_only_vlan_tagged- the device will discard untagged -+ * frames or Priority-Tagged frames received on this interface. -+ * When admit_only_untagged- untagged frames or Priority-Tagged -+ * frames received on this interface will be accepted and assigned -+ * to a VID based on the PVID and VID Set for this interface. -+ * When admit_all - the device will accept VLAN tagged, untagged -+ * and priority tagged frames. -+ * The default is admit_all -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_accepted_frames_cfg *cfg); -+ -+/** -+ * dpsw_if_set_accept_all_vlan() -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @accept_all: Accept or drop frames having different VLAN -+ * -+ * When this is accept (FALSE), the device will discard incoming -+ * frames for VLANs that do not include this interface in its -+ * Member set. When accept (TRUE), the interface will accept all incoming frames -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ int accept_all); -+ -+/** -+ * enum dpsw_counter - Counters types -+ * @DPSW_CNT_ING_FRAME: Counts ingress frames -+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes -+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames -+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame -+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames -+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes -+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames -+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes -+ * @DPSW_CNT_EGR_FRAME: Counts egress frames -+ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes -+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames -+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames -+ */ -+enum dpsw_counter { -+ DPSW_CNT_ING_FRAME = 0x0, -+ DPSW_CNT_ING_BYTE = 0x1, -+ DPSW_CNT_ING_FLTR_FRAME = 0x2, -+ DPSW_CNT_ING_FRAME_DISCARD = 0x3, -+ DPSW_CNT_ING_MCAST_FRAME = 0x4, -+ DPSW_CNT_ING_MCAST_BYTE = 0x5, -+ DPSW_CNT_ING_BCAST_FRAME = 0x6, -+ DPSW_CNT_ING_BCAST_BYTES = 0x7, -+ DPSW_CNT_EGR_FRAME = 0x8, -+ DPSW_CNT_EGR_BYTE = 0x9, -+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa, -+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb -+}; -+ -+/** -+ * dpsw_if_get_counter() - Get specific counter of particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @type: Counter type -+ * @counter: return value -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t *counter); -+ -+/** -+ * dpsw_if_set_counter() - Set specific counter of particular interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @type: Counter type -+ * @counter: New counter value -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ enum dpsw_counter type, -+ uint64_t counter); -+ -+/** -+ * Maximum number of TC -+ */ -+#define DPSW_MAX_TC 8 -+ -+/** -+ * enum dpsw_priority_selector - User priority -+ * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which -+ * refers to the IEEE 802.1p priority. -+ * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit -+ * field from IP header -+ * -+ */ -+enum dpsw_priority_selector { -+ DPSW_UP_PCP = 0, -+ DPSW_UP_DSCP = 1 -+}; -+ -+/** -+ * enum dpsw_schedule_mode - Traffic classes scheduling -+ * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority -+ * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm -+ */ -+enum dpsw_schedule_mode { -+ DPSW_SCHED_STRICT_PRIORITY, -+ DPSW_SCHED_WEIGHTED -+}; -+ -+/** -+ * struct dpsw_tx_schedule_cfg - traffic class configuration -+ * @mode: Strict or weight-based scheduling -+ * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000 -+ */ -+struct dpsw_tx_schedule_cfg { -+ enum dpsw_schedule_mode mode; -+ uint16_t delta_bandwidth; -+}; -+ -+/** -+ * struct dpsw_tx_selection_cfg - Mapping user priority into traffic -+ * class configuration -+ * @priority_selector: Source for user priority regeneration -+ * @tc_id: The Regenerated User priority that the incoming -+ * User Priority is mapped to for this interface -+ * @tc_sched: Traffic classes configuration -+ */ -+struct dpsw_tx_selection_cfg { -+ enum dpsw_priority_selector priority_selector; -+ uint8_t tc_id[DPSW_MAX_PRIORITIES]; -+ struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC]; -+}; -+ -+/** -+ * dpsw_if_set_tx_selection() - Function is used for mapping variety -+ * of frame fields -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Traffic class mapping configuration -+ * -+ * Function is used for mapping variety of frame fields (DSCP, PCP) -+ * to Traffic Class. Traffic class is a number -+ * in the range from 0 to 7 -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_tx_selection_cfg *cfg); -+ -+/** -+ * enum dpsw_reflection_filter - Filter type for frames to reflect -+ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames -+ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to -+ * particular VLAN defined by vid parameter -+ * -+ */ -+enum dpsw_reflection_filter { -+ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0, -+ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1 -+}; -+ -+/** -+ * struct dpsw_reflection_cfg - Structure representing reflection information -+ * @filter: Filter type for frames to reflect -+ * @vlan_id: Vlan Id to reflect; valid only when filter type is -+ * DPSW_INGRESS_VLAN -+ */ -+struct dpsw_reflection_cfg { -+ enum dpsw_reflection_filter filter; -+ uint16_t vlan_id; -+}; -+ -+/** -+ * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Reflection configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg); -+ -+/** -+ * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Reflection configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_reflection_cfg *cfg); -+ -+/** -+ * enum dpsw_metering_mode - Metering modes -+ * @DPSW_METERING_MODE_NONE: metering disabled -+ * @DPSW_METERING_MODE_RFC2698: RFC 2698 -+ * @DPSW_METERING_MODE_RFC4115: RFC 4115 -+ */ -+enum dpsw_metering_mode { -+ DPSW_METERING_MODE_NONE = 0, -+ DPSW_METERING_MODE_RFC2698, -+ DPSW_METERING_MODE_RFC4115 -+}; -+ -+/** -+ * enum dpsw_metering_unit - Metering count -+ * @DPSW_METERING_UNIT_BYTES: count bytes -+ * @DPSW_METERING_UNIT_FRAMES: count frames -+ */ -+enum dpsw_metering_unit { -+ DPSW_METERING_UNIT_BYTES = 0, -+ DPSW_METERING_UNIT_FRAMES -+}; -+ -+/** -+ * struct dpsw_metering_cfg - Metering configuration -+ * @mode: metering modes -+ * @units: Bytes or frame units -+ * @cir: Committed information rate (CIR) in Kbits/s -+ * @eir: Peak information rate (PIR) Kbit/s rfc2698 -+ * Excess information rate (EIR) Kbit/s rfc4115 -+ * @cbs: Committed burst size (CBS) in bytes -+ * @ebs: Peak burst size (PBS) in bytes for rfc2698 -+ * Excess bust size (EBS) in bytes rfc4115 -+ * -+ */ -+struct dpsw_metering_cfg { -+ enum dpsw_metering_mode mode; -+ enum dpsw_metering_unit units; -+ uint32_t cir; -+ uint32_t eir; -+ uint32_t cbs; -+ uint32_t ebs; -+}; -+ -+/** -+ * dpsw_if_set_flooding_metering() - Set flooding metering -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @cfg: Metering parameters -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ const struct dpsw_metering_cfg *cfg); -+ -+/** -+ * dpsw_if_set_metering() - Set interface metering for flooding -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @tc_id: Traffic class ID -+ * @cfg: Metering parameters -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_metering(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ const struct dpsw_metering_cfg *cfg); -+ -+/** -+ * enum dpsw_early_drop_unit - DPSW early drop unit -+ * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes -+ * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames -+ */ -+enum dpsw_early_drop_unit { -+ DPSW_EARLY_DROP_UNIT_BYTE = 0, -+ DPSW_EARLY_DROP_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpsw_early_drop_mode - DPSW early drop mode -+ * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled -+ * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode -+ * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode -+ */ -+enum dpsw_early_drop_mode { -+ DPSW_EARLY_DROP_MODE_NONE = 0, -+ DPSW_EARLY_DROP_MODE_TAIL, -+ DPSW_EARLY_DROP_MODE_WRED -+}; -+ -+/** -+ * struct dpsw_wred_cfg - WRED configuration -+ * @max_threshold: maximum threshold that packets may be discarded. Above this -+ * threshold all packets are discarded; must be less than 2^39; -+ * approximated to be expressed as (x+256)*2^(y-1) due to HW -+ * implementation. -+ * @min_threshold: minimum threshold that packets may be discarded at -+ * @drop_probability: probability that a packet will be discarded (1-100, -+ * associated with the maximum threshold) -+ */ -+struct dpsw_wred_cfg { -+ uint64_t min_threshold; -+ uint64_t max_threshold; -+ uint8_t drop_probability; -+}; -+ -+/** -+ * struct dpsw_early_drop_cfg - early-drop configuration -+ * @drop_mode: drop mode -+ * @units: count units -+ * @yellow: WRED - 'yellow' configuration -+ * @green: WRED - 'green' configuration -+ * @tail_drop_threshold: tail drop threshold -+ */ -+struct dpsw_early_drop_cfg { -+ enum dpsw_early_drop_mode drop_mode; -+ enum dpsw_early_drop_unit units; -+ struct dpsw_wred_cfg yellow; -+ struct dpsw_wred_cfg green; -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpsw_if_tc_set_early_drop -+ * -+ */ -+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf); -+ -+/** -+ * dpsw_if_set_early_drop() - Set interface traffic class early-drop -+ * configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 64 bytes; -+ * Must be cacheline-aligned and DMA-able memory -+ * -+ * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop() -+ * to prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier -+ * @tpid: An additional tag protocol identifier -+ */ -+struct dpsw_custom_tpid_cfg { -+ uint16_t tpid; -+}; -+ -+/** -+ * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @cfg: Tag Protocol identifier -+ * -+ * API Configures a distinct Ethernet type value (or TPID value) -+ * to indicate a VLAN tag in addition to the common -+ * TPID values 0x8100 and 0x88A8. -+ * Two additional TPID's are supported -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg); -+ -+/** -+ * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @cfg: Tag Protocol identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_custom_tpid_cfg *cfg); -+ -+/** -+ * dpsw_if_enable() - Enable Interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id); -+ -+/** -+ * dpsw_if_disable() - Disable Interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id); -+ -+/** -+ * struct dpsw_if_attr - Structure representing DPSW interface attributes -+ * @num_tcs: Number of traffic classes -+ * @rate: Transmit rate in bits per second -+ * @options: Interface configuration options (bitmap) -+ * @enabled: Indicates if interface is enabled -+ * @accept_all_vlan: The device discards/accepts incoming frames -+ * for VLANs that do not include this interface -+ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device -+ * discards untagged frames or priority-tagged frames received on -+ * this interface; -+ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority- -+ * tagged frames received on this interface are accepted -+ * @qdid: control frames transmit qdid -+ */ -+struct dpsw_if_attr { -+ uint8_t num_tcs; -+ uint32_t rate; -+ uint32_t options; -+ int enabled; -+ int accept_all_vlan; -+ enum dpsw_accepted_frames admit_untagged; -+ uint16_t qdid; -+}; -+ -+/** -+ * dpsw_if_get_attributes() - Function obtains attributes of interface -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @attr: Returned interface attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ struct dpsw_if_attr *attr); -+ -+/** -+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @frame_length: Maximum Frame Length -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t frame_length); -+ -+/** -+ * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @if_id: Interface Identifier -+ * @frame_length: Returned maximum Frame Length -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t if_id, -+ uint16_t *frame_length); -+ -+/** -+ * struct dpsw_vlan_cfg - VLAN Configuration -+ * @fdb_id: Forwarding Data Base -+ */ -+struct dpsw_vlan_cfg { -+ uint16_t fdb_id; -+}; -+ -+/** -+ * dpsw_vlan_add() - Adding new VLAN to DPSW. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: VLAN configuration -+ * -+ * Only VLAN ID and FDB ID are required parameters here. -+ * 12 bit VLAN ID is defined in IEEE802.1Q. -+ * Adding a duplicate VLAN ID is not allowed. -+ * FDB ID can be shared across multiple VLANs. Shared learning -+ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs -+ * with same fdb_id -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_cfg *cfg); -+ -+/** -+ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces -+ * @num_ifs: The number of interfaces that are assigned to the egress -+ * list for this VLAN -+ * @if_id: The set of interfaces that are -+ * assigned to the egress list for this VLAN -+ */ -+struct dpsw_vlan_if_cfg { -+ uint16_t num_ifs; -+ uint16_t if_id[DPSW_MAX_IF]; -+}; -+ -+/** -+ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Set of interfaces to add -+ * -+ * It adds only interfaces not belonging to this VLAN yet, -+ * otherwise an error is generated and an entire command is -+ * ignored. This function can be called numerous times always -+ * providing required interfaces delta. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be -+ * transmitted as untagged. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: set of interfaces that should be transmitted as untagged -+ * -+ * These interfaces should already belong to this VLAN. -+ * By default all interfaces are transmitted as tagged. -+ * Providing un-existing interface or untagged interface that is -+ * configured untagged already generates an error and the entire -+ * command is ignored. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be -+ * included in flooding when frame with unknown destination -+ * unicast MAC arrived. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Set of interfaces that should be used for flooding -+ * -+ * These interfaces should belong to this VLAN. By default all -+ * interfaces are included into flooding list. Providing -+ * un-existing interface or an interface that already in the -+ * flooding list generates an error and the entire command is -+ * ignored. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Set of interfaces that should be removed -+ * -+ * Interfaces must belong to this VLAN, otherwise an error -+ * is returned and an the command is ignored -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be -+ * converted from transmitted as untagged to transmit as tagged. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: set of interfaces that should be removed -+ * -+ * Interfaces provided by API have to belong to this VLAN and -+ * configured untagged, otherwise an error is returned and the -+ * command is ignored -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be -+ * removed from the flooding list. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: set of interfaces used for flooding -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ const struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_remove() - Remove an entire VLAN -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * struct dpsw_vlan_attr - VLAN attributes -+ * @fdb_id: Associated FDB ID -+ * @num_ifs: Number of interfaces -+ * @num_untagged_ifs: Number of untagged interfaces -+ * @num_flooding_ifs: Number of flooding interfaces -+ */ -+struct dpsw_vlan_attr { -+ uint16_t fdb_id; -+ uint16_t num_ifs; -+ uint16_t num_untagged_ifs; -+ uint16_t num_flooding_ifs; -+}; -+ -+/** -+ * dpsw_vlan_get_attributes() - Get VLAN attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @attr: Returned DPSW attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_attr *attr); -+ -+/** -+ * dpsw_vlan_get_if() - Get interfaces belong to this VLAN -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Returned set of interfaces belong to this VLAN -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Returned set of flooding interfaces -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as -+ * untagged -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @vlan_id: VLAN Identifier -+ * @cfg: Returned set of untagged interfaces -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id, -+ struct dpsw_vlan_if_cfg *cfg); -+ -+/** -+ * struct dpsw_fdb_cfg - FDB Configuration -+ * @num_fdb_entries: Number of FDB entries -+ * @fdb_aging_time: Aging time in seconds -+ */ -+struct dpsw_fdb_cfg { -+ uint16_t num_fdb_entries; -+ uint16_t fdb_aging_time; -+}; -+ -+/** -+ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for -+ * the reference -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Returned Forwarding Database Identifier -+ * @cfg: FDB Configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *fdb_id, -+ const struct dpsw_fdb_cfg *cfg); -+ -+/** -+ * dpsw_fdb_remove() - Remove FDB from switch -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id); -+ -+/** -+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic -+ * @DPSW_FDB_ENTRY_STATIC: Static entry -+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry -+ */ -+enum dpsw_fdb_entry_type { -+ DPSW_FDB_ENTRY_STATIC = 0, -+ DPSW_FDB_ENTRY_DINAMIC = 1 -+}; -+ -+/** -+ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration -+ * @type: Select static or dynamic entry -+ * @mac_addr: MAC address -+ * @if_egress: Egress interface ID -+ */ -+struct dpsw_fdb_unicast_cfg { -+ enum dpsw_fdb_entry_type type; -+ uint8_t mac_addr[6]; -+ uint16_t if_egress; -+}; -+ -+/** -+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Unicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by -+ * unicast Ethernet address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Returned unicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_unicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Unicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_unicast_cfg *cfg); -+ -+/** -+ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration -+ * @type: Select static or dynamic entry -+ * @mac_addr: MAC address -+ * @num_ifs: Number of external and internal interfaces -+ * @if_id: Egress interface IDs -+ */ -+struct dpsw_fdb_multicast_cfg { -+ enum dpsw_fdb_entry_type type; -+ uint8_t mac_addr[6]; -+ uint16_t num_ifs; -+ uint16_t if_id[DPSW_MAX_IF]; -+}; -+ -+/** -+ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Multicast entry configuration -+ * -+ * If group doesn't exist, it will be created. -+ * It adds only interfaces not belonging to this multicast group -+ * yet, otherwise error will be generated and the command is -+ * ignored. -+ * This function may be called numerous times always providing -+ * required interfaces delta. -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet -+ * address. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Returned multicast entry configuration -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_multicast_cfg *cfg); -+ -+/** -+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast -+ * group. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @cfg: Multicast entry configuration -+ * -+ * Interfaces provided by this API have to exist in the group, -+ * otherwise an error will be returned and an entire command -+ * ignored. If there is no interface left in the group, -+ * an entire group is deleted -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ const struct dpsw_fdb_multicast_cfg *cfg); -+ -+/** -+ * enum dpsw_fdb_learning_mode - Auto-learning modes -+ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning -+ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning -+ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU -+ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU -+ * -+ * NONE - SECURE LEARNING -+ * SMAC found DMAC found CTLU Action -+ * v v Forward frame to -+ * 1. DMAC destination -+ * - v Forward frame to -+ * 1. DMAC destination -+ * 2. Control interface -+ * v - Forward frame to -+ * 1. Flooding list of interfaces -+ * - - Forward frame to -+ * 1. Flooding list of interfaces -+ * 2. Control interface -+ * SECURE LEARING -+ * SMAC found DMAC found CTLU Action -+ * v v Forward frame to -+ * 1. DMAC destination -+ * - v Forward frame to -+ * 1. Control interface -+ * v - Forward frame to -+ * 1. Flooding list of interfaces -+ * - - Forward frame to -+ * 1. Control interface -+ */ -+enum dpsw_fdb_learning_mode { -+ DPSW_FDB_LEARNING_MODE_DIS = 0, -+ DPSW_FDB_LEARNING_MODE_HW = 1, -+ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2, -+ DPSW_FDB_LEARNING_MODE_SECURE = 3 -+}; -+ -+/** -+ * dpsw_fdb_set_learning_mode() - Define FDB learning mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @mode: learning mode -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ enum dpsw_fdb_learning_mode mode); -+ -+/** -+ * struct dpsw_fdb_attr - FDB Attributes -+ * @max_fdb_entries: Number of FDB entries -+ * @fdb_aging_time: Aging time in seconds -+ * @learning_mode: Learning mode -+ * @num_fdb_mc_groups: Current number of multicast groups -+ * @max_fdb_mc_groups: Maximum number of multicast groups -+ */ -+struct dpsw_fdb_attr { -+ uint16_t max_fdb_entries; -+ uint16_t fdb_aging_time; -+ enum dpsw_fdb_learning_mode learning_mode; -+ uint16_t num_fdb_mc_groups; -+ uint16_t max_fdb_mc_groups; -+}; -+ -+/** -+ * dpsw_fdb_get_attributes() - Get FDB attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @fdb_id: Forwarding Database Identifier -+ * @attr: Returned FDB attributes -+ * -+ * Return: Completion status. '0' on Success; Error code otherwise. -+ */ -+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t fdb_id, -+ struct dpsw_fdb_attr *attr); -+ -+/** -+ * struct dpsw_acl_cfg - ACL Configuration -+ * @max_entries: Number of FDB entries -+ */ -+struct dpsw_acl_cfg { -+ uint16_t max_entries; -+}; -+ -+/** -+ * struct dpsw_acl_fields - ACL fields. -+ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast, -+ * slow protocols, MVRP, STP -+ * @l2_source_mac: Source MAC address -+ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following -+ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae, -+ * Q-in-Q, IPv4, IPv6, PPPoE -+ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload -+ * @l2_vlan_id: layer 2 VLAN ID -+ * @l2_ether_type: layer 2 Ethernet type -+ * @l3_dscp: Layer 3 differentiated services code point -+ * @l3_protocol: Tells the Network layer at the destination host, to which -+ * Protocol this packet belongs to. The following protocol are -+ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6 -+ * (encapsulation), GRE, PTP -+ * @l3_source_ip: Source IPv4 IP -+ * @l3_dest_ip: Destination IPv4 IP -+ * @l4_source_port: Source TCP/UDP Port -+ * @l4_dest_port: Destination TCP/UDP Port -+ */ -+struct dpsw_acl_fields { -+ uint8_t l2_dest_mac[6]; -+ uint8_t l2_source_mac[6]; -+ uint16_t l2_tpid; -+ uint8_t l2_pcp_dei; -+ uint16_t l2_vlan_id; -+ uint16_t l2_ether_type; -+ uint8_t l3_dscp; -+ uint8_t l3_protocol; -+ uint32_t l3_source_ip; -+ uint32_t l3_dest_ip; -+ uint16_t l4_source_port; -+ uint16_t l4_dest_port; -+}; -+ -+/** -+ * struct dpsw_acl_key - ACL key -+ * @match: Match fields -+ * @mask: Mask: b'1 - valid, b'0 don't care -+ */ -+struct dpsw_acl_key { -+ struct dpsw_acl_fields match; -+ struct dpsw_acl_fields mask; -+}; -+ -+/** -+ * enum dpsw_acl_action -+ * @DPSW_ACL_ACTION_DROP: Drop frame -+ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port -+ * @DPSW_ACL_ACTION_ACCEPT: Accept frame -+ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface -+ */ -+enum dpsw_acl_action { -+ DPSW_ACL_ACTION_DROP, -+ DPSW_ACL_ACTION_REDIRECT, -+ DPSW_ACL_ACTION_ACCEPT, -+ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF -+}; -+ -+/** -+ * struct dpsw_acl_result - ACL action -+ * @action: Action should be taken when ACL entry hit -+ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for -+ * action -+ */ -+struct dpsw_acl_result { -+ enum dpsw_acl_action action; -+ uint16_t if_id; -+}; -+ -+/** -+ * struct dpsw_acl_entry_cfg - ACL entry -+ * @key_iova: I/O virtual address of DMA-able memory filled with key after call -+ * to dpsw_acl_prepare_entry_cfg() -+ * @result: Required action when entry hit occurs -+ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change -+ * during the lifetime of a Policy. It is user responsibility to -+ * space the priorities according to consequent rule additions. -+ */ -+struct dpsw_acl_entry_cfg { -+ uint64_t key_iova; -+ struct dpsw_acl_result result; -+ int precedence; -+}; -+ -+/** -+ * dpsw_acl_add() - Adds ACL to L2 switch. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: Returned ACL ID, for the future reference -+ * @cfg: ACL configuration -+ * -+ * Create Access Control List. Multiple ACLs can be created and -+ * co-exist in L2 switch -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_add(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *acl_id, -+ const struct dpsw_acl_cfg *cfg); -+ -+/** -+ * dpsw_acl_remove() - Removes ACL from L2 switch. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_remove(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id); -+ -+/** -+ * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL. -+ * @key: key -+ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before adding or removing acl_entry -+ * -+ */ -+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, -+ uint8_t *entry_cfg_buf); -+ -+/** -+ * dpsw_acl_add_entry() - Adds an entry to ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: entry configuration -+ * -+ * warning: This function has to be called after dpsw_acl_set_entry_cfg() -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg); -+ -+/** -+ * dpsw_acl_remove_entry() - Removes an entry from ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: entry configuration -+ * -+ * warning: This function has to be called after dpsw_acl_set_entry_cfg() -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_entry_cfg *cfg); -+ -+/** -+ * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL -+ * @num_ifs: Number of interfaces -+ * @if_id: List of interfaces -+ */ -+struct dpsw_acl_if_cfg { -+ uint16_t num_ifs; -+ uint16_t if_id[DPSW_MAX_IF]; -+}; -+ -+/** -+ * dpsw_acl_add_if() - Associate interface/interfaces with ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: interfaces list -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg); -+ -+/** -+ * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPSW object -+ * @acl_id: ACL ID -+ * @cfg: interfaces list -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ const struct dpsw_acl_if_cfg *cfg); -+ -+/** -+ * struct dpsw_acl_attr - ACL Attributes -+ * @max_entries: Max number of ACL entries -+ * @num_entries: Number of used ACL entries -+ * @num_ifs: Number of interfaces associated with ACL -+ */ -+struct dpsw_acl_attr { -+ uint16_t max_entries; -+ uint16_t num_entries; -+ uint16_t num_ifs; -+}; -+ -+/** -+* dpsw_acl_get_attributes() - Get specific counter of particular interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* @acl_id: ACL Identifier -+* @attr: Returned ACL attributes -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t acl_id, -+ struct dpsw_acl_attr *attr); -+/** -+* struct dpsw_ctrl_if_attr - Control interface attributes -+* @rx_fqid: Receive FQID -+* @rx_err_fqid: Receive error FQID -+* @tx_err_conf_fqid: Transmit error and confirmation FQID -+*/ -+struct dpsw_ctrl_if_attr { -+ uint32_t rx_fqid; -+ uint32_t rx_err_fqid; -+ uint32_t tx_err_conf_fqid; -+}; -+ -+/** -+* dpsw_ctrl_if_get_attributes() - Obtain control interface attributes -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* @attr: Returned control interface attributes -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpsw_ctrl_if_attr *attr); -+ -+/** -+ * Maximum number of DPBP -+ */ -+#define DPSW_MAX_DPBP 8 -+ -+/** -+ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration -+ * @num_dpbp: Number of DPBPs -+ * @pools: Array of buffer pools parameters; The number of valid entries -+ * must match 'num_dpbp' value -+ */ -+struct dpsw_ctrl_if_pools_cfg { -+ uint8_t num_dpbp; -+ /** -+ * struct pools - Buffer pools parameters -+ * @dpbp_id: DPBP object ID -+ * @buffer_size: Buffer size -+ * @backup_pool: Backup pool -+ */ -+ struct { -+ int dpbp_id; -+ uint16_t buffer_size; -+ int backup_pool; -+ } pools[DPSW_MAX_DPBP]; -+}; -+ -+/** -+* dpsw_ctrl_if_set_pools() - Set control interface buffer pools -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* @cfg: buffer pools configuration -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpsw_ctrl_if_pools_cfg *cfg); -+ -+/** -+* dpsw_ctrl_if_enable() - Enable control interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+* dpsw_ctrl_if_disable() - Function disables control interface -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPSW object -+* -+* Return: '0' on Success; Error code otherwise. -+*/ -+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+#endif /* __FSL_DPSW_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h b/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h -new file mode 100644 -index 0000000..c65fe38 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h -@@ -0,0 +1,916 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPSW_CMD_H -+#define __FSL_DPSW_CMD_H -+ -+/* DPSW Version */ -+#define DPSW_VER_MAJOR 7 -+#define DPSW_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPSW_CMDID_CLOSE 0x800 -+#define DPSW_CMDID_OPEN 0x802 -+#define DPSW_CMDID_CREATE 0x902 -+#define DPSW_CMDID_DESTROY 0x900 -+ -+#define DPSW_CMDID_ENABLE 0x002 -+#define DPSW_CMDID_DISABLE 0x003 -+#define DPSW_CMDID_GET_ATTR 0x004 -+#define DPSW_CMDID_RESET 0x005 -+#define DPSW_CMDID_IS_ENABLED 0x006 -+ -+#define DPSW_CMDID_SET_IRQ 0x010 -+#define DPSW_CMDID_GET_IRQ 0x011 -+#define DPSW_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPSW_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPSW_CMDID_SET_IRQ_MASK 0x014 -+#define DPSW_CMDID_GET_IRQ_MASK 0x015 -+#define DPSW_CMDID_GET_IRQ_STATUS 0x016 -+#define DPSW_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPSW_CMDID_SET_REFLECTION_IF 0x022 -+ -+#define DPSW_CMDID_ADD_CUSTOM_TPID 0x024 -+ -+#define DPSW_CMDID_REMOVE_CUSTOM_TPID 0x026 -+ -+#define DPSW_CMDID_IF_SET_TCI 0x030 -+#define DPSW_CMDID_IF_SET_STP 0x031 -+#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES 0x032 -+#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN 0x033 -+#define DPSW_CMDID_IF_GET_COUNTER 0x034 -+#define DPSW_CMDID_IF_SET_COUNTER 0x035 -+#define DPSW_CMDID_IF_SET_TX_SELECTION 0x036 -+#define DPSW_CMDID_IF_ADD_REFLECTION 0x037 -+#define DPSW_CMDID_IF_REMOVE_REFLECTION 0x038 -+#define DPSW_CMDID_IF_SET_FLOODING_METERING 0x039 -+#define DPSW_CMDID_IF_SET_METERING 0x03A -+#define DPSW_CMDID_IF_SET_EARLY_DROP 0x03B -+ -+#define DPSW_CMDID_IF_ENABLE 0x03D -+#define DPSW_CMDID_IF_DISABLE 0x03E -+ -+#define DPSW_CMDID_IF_GET_ATTR 0x042 -+ -+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH 0x044 -+#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH 0x045 -+#define DPSW_CMDID_IF_GET_LINK_STATE 0x046 -+#define DPSW_CMDID_IF_SET_FLOODING 0x047 -+#define DPSW_CMDID_IF_SET_BROADCAST 0x048 -+#define DPSW_CMDID_IF_SET_MULTICAST 0x049 -+#define DPSW_CMDID_IF_GET_TCI 0x04A -+ -+#define DPSW_CMDID_IF_SET_LINK_CFG 0x04C -+ -+#define DPSW_CMDID_VLAN_ADD 0x060 -+#define DPSW_CMDID_VLAN_ADD_IF 0x061 -+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED 0x062 -+#define DPSW_CMDID_VLAN_ADD_IF_FLOODING 0x063 -+#define DPSW_CMDID_VLAN_REMOVE_IF 0x064 -+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED 0x065 -+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING 0x066 -+#define DPSW_CMDID_VLAN_REMOVE 0x067 -+#define DPSW_CMDID_VLAN_GET_IF 0x068 -+#define DPSW_CMDID_VLAN_GET_IF_FLOODING 0x069 -+#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED 0x06A -+#define DPSW_CMDID_VLAN_GET_ATTRIBUTES 0x06B -+ -+#define DPSW_CMDID_FDB_GET_MULTICAST 0x080 -+#define DPSW_CMDID_FDB_GET_UNICAST 0x081 -+#define DPSW_CMDID_FDB_ADD 0x082 -+#define DPSW_CMDID_FDB_REMOVE 0x083 -+#define DPSW_CMDID_FDB_ADD_UNICAST 0x084 -+#define DPSW_CMDID_FDB_REMOVE_UNICAST 0x085 -+#define DPSW_CMDID_FDB_ADD_MULTICAST 0x086 -+#define DPSW_CMDID_FDB_REMOVE_MULTICAST 0x087 -+#define DPSW_CMDID_FDB_SET_LEARNING_MODE 0x088 -+#define DPSW_CMDID_FDB_GET_ATTR 0x089 -+ -+#define DPSW_CMDID_ACL_ADD 0x090 -+#define DPSW_CMDID_ACL_REMOVE 0x091 -+#define DPSW_CMDID_ACL_ADD_ENTRY 0x092 -+#define DPSW_CMDID_ACL_REMOVE_ENTRY 0x093 -+#define DPSW_CMDID_ACL_ADD_IF 0x094 -+#define DPSW_CMDID_ACL_REMOVE_IF 0x095 -+#define DPSW_CMDID_ACL_GET_ATTR 0x096 -+ -+#define DPSW_CMDID_CTRL_IF_GET_ATTR 0x0A0 -+#define DPSW_CMDID_CTRL_IF_SET_POOLS 0x0A1 -+#define DPSW_CMDID_CTRL_IF_ENABLE 0x0A2 -+#define DPSW_CMDID_CTRL_IF_DISABLE 0x0A3 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_OPEN(cmd, dpsw_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpsw_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->adv.max_fdbs);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->adv.max_meters_per_if);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_component_type, \ -+ cfg->adv.component_type);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_vlans);\ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_fdb_entries);\ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.fdb_aging_time);\ -+ MC_CMD_OP(cmd, 1, 48, 16, uint16_t, cfg->adv.max_fdb_mc_groups);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->num_ifs);\ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->max_fdbs);\ -+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->num_fdbs);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->max_vlans);\ -+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_vlans);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->max_fdb_entries);\ -+ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->fdb_aging_time);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, attr->mem_size);\ -+ MC_RSP_OP(cmd, 2, 48, 16, uint16_t, attr->max_fdb_mc_groups);\ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\ -+ MC_RSP_OP(cmd, 4, 0, 8, uint8_t, attr->max_meters_per_if);\ -+ MC_RSP_OP(cmd, 4, 8, 4, enum dpsw_component_type, \ -+ attr->component_type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_SET_REFLECTION_IF(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_BROADCAST(cmd, if_id, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_MULTICAST(cmd, if_id, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 12, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 28, 1, uint8_t, cfg->dei);\ -+ MC_CMD_OP(cmd, 0, 29, 3, uint8_t, cfg->pcp);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_TCI(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_TCI(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, cfg->dei);\ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, cfg->pcp);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_STP(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_stp_state, cfg->state);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_accepted_frames, cfg->type);\ -+ MC_CMD_OP(cmd, 0, 20, 4, enum dpsw_action, cfg->unaccept_act);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 1, int, accept_all);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, counter);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 3, enum dpsw_priority_selector, \ -+ cfg->priority_selector);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->tc_id[0]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->tc_id[1]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->tc_id[2]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->tc_id[3]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->tc_id[4]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->tc_id[5]);\ -+ MC_CMD_OP(cmd, 1, 48, 8, uint8_t, cfg->tc_id[6]);\ -+ MC_CMD_OP(cmd, 1, 56, 8, uint8_t, cfg->tc_id[7]);\ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[0].mode);\ -+ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[1].mode);\ -+ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[2].mode);\ -+ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[3].mode);\ -+ MC_CMD_OP(cmd, 4, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 4, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[4].mode);\ -+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 4, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[5].mode);\ -+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 5, 16, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[6].mode);\ -+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 5, 48, 4, enum dpsw_schedule_mode, \ -+ cfg->tc_sched[7].mode);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\ -+ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\ -+ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id);\ -+ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\ -+ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_PREP_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \ -+ MC_PREP_OP(ext, 0, 2, 2, \ -+ enum dpsw_early_drop_unit, cfg->units); \ -+ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_EXT_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \ -+ MC_EXT_OP(ext, 0, 2, 2, \ -+ enum dpsw_early_drop_unit, cfg->units); \ -+ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, if_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_ENABLE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_DISABLE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_ATTR(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 4, enum dpsw_accepted_frames, \ -+ attr->admit_untagged);\ -+ MC_RSP_OP(cmd, 0, 5, 1, int, attr->enabled);\ -+ MC_RSP_OP(cmd, 0, 6, 1, int, attr->accept_all_vlan);\ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->num_tcs);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qdid);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->rate);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, frame_length);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, frame_length) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_IF_GET_LINK_STATE(cmd, state) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+#define DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_REMOVE(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->fdb_id); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_ifs); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_untagged_ifs); \ -+ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->num_flooding_ifs); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_IF(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_IF(cmd, cfg) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_VLAN_GET_IF_UNTAGGED(cmd, cfg) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs) -+ -+/* param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_ADD(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->fdb_aging_time);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->num_fdb_entries);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_ADD(cmd, fdb_id) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, fdb_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_REMOVE(cmd, fdb_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint16_t, cfg->if_egress);\ -+ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_GET_UNICAST(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\ -+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\ -+ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->num_ifs);\ -+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\ -+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\ -+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\ -+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\ -+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\ -+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\ -+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_fdb_learning_mode, mode);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_FDB_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->max_fdb_entries);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->fdb_aging_time);\ -+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_fdb_mc_groups);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_fdb_mc_groups);\ -+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_learning_mode, \ -+ attr->learning_mode);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_ADD(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->max_entries) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_ACL_ADD(cmd, acl_id) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, acl_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_REMOVE(cmd, acl_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_PREP_ACL_ENTRY(ext, key) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\ -+ MC_PREP_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\ -+ MC_PREP_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\ -+ MC_PREP_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\ -+ MC_PREP_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\ -+ MC_PREP_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\ -+ MC_PREP_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\ -+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\ -+ MC_PREP_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\ -+ MC_PREP_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\ -+ MC_PREP_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\ -+ MC_PREP_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\ -+ MC_PREP_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\ -+ MC_PREP_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\ -+ MC_PREP_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\ -+ MC_PREP_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\ -+ MC_PREP_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\ -+ MC_PREP_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\ -+ MC_PREP_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\ -+ MC_PREP_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\ -+ MC_PREP_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\ -+ MC_PREP_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\ -+ MC_PREP_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\ -+ MC_PREP_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\ -+ MC_PREP_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\ -+ MC_PREP_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\ -+ MC_PREP_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\ -+ MC_PREP_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\ -+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\ -+ MC_PREP_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\ -+ MC_PREP_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\ -+ MC_PREP_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\ -+ MC_PREP_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\ -+ MC_PREP_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\ -+ MC_PREP_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\ -+ MC_PREP_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\ -+ MC_PREP_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\ -+ MC_PREP_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\ -+ MC_PREP_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\ -+ MC_PREP_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\ -+ MC_PREP_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\ -+ MC_PREP_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\ -+ MC_PREP_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\ -+ MC_PREP_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_EXT_ACL_ENTRY(ext, key) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\ -+ MC_EXT_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\ -+ MC_EXT_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\ -+ MC_EXT_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\ -+ MC_EXT_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\ -+ MC_EXT_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\ -+ MC_EXT_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\ -+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\ -+ MC_EXT_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\ -+ MC_EXT_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\ -+ MC_EXT_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\ -+ MC_EXT_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\ -+ MC_EXT_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\ -+ MC_EXT_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\ -+ MC_EXT_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\ -+ MC_EXT_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\ -+ MC_EXT_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\ -+ MC_EXT_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\ -+ MC_EXT_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\ -+ MC_EXT_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\ -+ MC_EXT_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\ -+ MC_EXT_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\ -+ MC_EXT_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\ -+ MC_EXT_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\ -+ MC_EXT_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\ -+ MC_EXT_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\ -+ MC_EXT_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\ -+ MC_EXT_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\ -+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\ -+ MC_EXT_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\ -+ MC_EXT_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\ -+ MC_EXT_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\ -+ MC_EXT_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\ -+ MC_EXT_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\ -+ MC_EXT_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\ -+ MC_EXT_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\ -+ MC_EXT_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\ -+ MC_EXT_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\ -+ MC_EXT_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\ -+ MC_EXT_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\ -+ MC_EXT_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\ -+ MC_EXT_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\ -+ MC_EXT_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\ -+ MC_EXT_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\ -+ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_ACL_GET_ATTR(cmd, acl_id) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_ACL_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_entries);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_entries);\ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_ifs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rx_fqid);\ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, attr->rx_err_fqid);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tx_err_conf_fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPSW_CMD_CTRL_IF_SET_POOLS(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \ -+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\ -+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\ -+} while (0) -+ -+#endif /* __FSL_DPSW_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_mc_cmd.h b/drivers/net/dpaa2/mc/fsl_mc_cmd.h -new file mode 100644 -index 0000000..ca4fb64 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_mc_cmd.h -@@ -0,0 +1,221 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_MC_CMD_H -+#define __FSL_MC_CMD_H -+ -+#define MC_CMD_NUM_OF_PARAMS 7 -+ -+#define MAKE_UMASK64(_width) \ -+ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 :\ -+ (uint64_t)-1)) -+static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val) -+{ -+ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset); -+} -+ -+static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width) -+{ -+ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width)); -+} -+ -+struct mc_command { -+ uint64_t header; -+ uint64_t params[MC_CMD_NUM_OF_PARAMS]; -+}; -+ -+/** -+ * enum mc_cmd_status - indicates MC status at command response -+ * @MC_CMD_STATUS_OK: Completed successfully -+ * @MC_CMD_STATUS_READY: Ready to be processed -+ * @MC_CMD_STATUS_AUTH_ERR: Authentication error -+ * @MC_CMD_STATUS_NO_PRIVILEGE: No privilege -+ * @MC_CMD_STATUS_DMA_ERR: DMA or I/O error -+ * @MC_CMD_STATUS_CONFIG_ERR: Configuration error -+ * @MC_CMD_STATUS_TIMEOUT: Operation timed out -+ * @MC_CMD_STATUS_NO_RESOURCE: No resources -+ * @MC_CMD_STATUS_NO_MEMORY: No memory available -+ * @MC_CMD_STATUS_BUSY: Device is busy -+ * @MC_CMD_STATUS_UNSUPPORTED_OP: Unsupported operation -+ * @MC_CMD_STATUS_INVALID_STATE: Invalid state -+ */ -+enum mc_cmd_status { -+ MC_CMD_STATUS_OK = 0x0, -+ MC_CMD_STATUS_READY = 0x1, -+ MC_CMD_STATUS_AUTH_ERR = 0x3, -+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, -+ MC_CMD_STATUS_DMA_ERR = 0x5, -+ MC_CMD_STATUS_CONFIG_ERR = 0x6, -+ MC_CMD_STATUS_TIMEOUT = 0x7, -+ MC_CMD_STATUS_NO_RESOURCE = 0x8, -+ MC_CMD_STATUS_NO_MEMORY = 0x9, -+ MC_CMD_STATUS_BUSY = 0xA, -+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, -+ MC_CMD_STATUS_INVALID_STATE = 0xC -+}; -+ -+/* MC command flags */ -+ -+/** -+ * High priority flag -+ */ -+#define MC_CMD_FLAG_PRI 0x00008000 -+/** -+ * Command completion flag -+ */ -+#define MC_CMD_FLAG_INTR_DIS 0x01000000 -+ -+/** -+ * Command ID field offset -+ */ -+#define MC_CMD_HDR_CMDID_O 52 -+/** -+ * Command ID field size -+ */ -+#define MC_CMD_HDR_CMDID_S 12 -+/** -+ * Token field offset -+ */ -+#define MC_CMD_HDR_TOKEN_O 38 -+/** -+ * Token field size -+ */ -+#define MC_CMD_HDR_TOKEN_S 10 -+/** -+ * Status field offset -+ */ -+#define MC_CMD_HDR_STATUS_O 16 -+/** -+ * Status field size -+ */ -+#define MC_CMD_HDR_STATUS_S 8 -+/** -+ * Flags field offset -+ */ -+#define MC_CMD_HDR_FLAGS_O 0 -+/** -+ * Flags field size -+ */ -+#define MC_CMD_HDR_FLAGS_S 32 -+/** -+ * Command flags mask -+ */ -+#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00 -+ -+#define MC_CMD_HDR_READ_STATUS(_hdr) \ -+ ((enum mc_cmd_status)mc_dec((_hdr), \ -+ MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S)) -+ -+#define MC_CMD_HDR_READ_TOKEN(_hdr) \ -+ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S)) -+ -+#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \ -+ ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg))) -+ -+#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \ -+ (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width))) -+ -+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ -+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) -+ -+#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \ -+ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width))) -+ -+static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ uint64_t hdr; -+ -+ hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id); -+ hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S, -+ (cmd_flags & MC_CMD_HDR_FLAGS_MASK)); -+ hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token); -+ hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S, -+ MC_CMD_STATUS_READY); -+ -+ return hdr; -+} -+ -+/** -+ * mc_write_command - writes a command to a Management Complex (MC) portal -+ * -+ * @portal: pointer to an MC portal -+ * @cmd: pointer to a filled command -+ */ -+static inline void mc_write_command(struct mc_command __iomem *portal, -+ struct mc_command *cmd) -+{ -+ int i; -+ uint32_t word; -+ -+ /* copy command parameters into the portal */ -+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) -+ iowrite64(cmd->params[i], &portal->params[i]); -+ -+ /* submit the command by writing the header */ -+ word = (uint32_t)mc_dec(cmd->header, 32, 32); -+ iowrite32(word, (((uint32_t *)&portal->header) + 1)); -+ -+ word = (uint32_t)mc_dec(cmd->header, 0, 32); -+ iowrite32(word, (uint32_t *)&portal->header); -+} -+ -+/** -+ * mc_read_response - reads the response for the last MC command from a -+ * Management Complex (MC) portal -+ * -+ * @portal: pointer to an MC portal -+ * @resp: pointer to command response buffer -+ * -+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise. -+ */ -+static inline enum mc_cmd_status mc_read_response( -+ struct mc_command __iomem *portal, -+ struct mc_command *resp) -+{ -+ int i; -+ enum mc_cmd_status status; -+ -+ /* Copy command response header from MC portal: */ -+ resp->header = ioread64(&portal->header); -+ status = MC_CMD_HDR_READ_STATUS(resp->header); -+ if (status != MC_CMD_STATUS_OK) -+ return status; -+ -+ /* Copy command response data from MC portal: */ -+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) -+ resp->params[i] = ioread64(&portal->params[i]); -+ -+ return status; -+} -+ -+#endif /* __FSL_MC_CMD_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_mc_sys.h b/drivers/net/dpaa2/mc/fsl_mc_sys.h -new file mode 100644 -index 0000000..b9f4244 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_mc_sys.h -@@ -0,0 +1,98 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_MC_SYS_H -+#define _FSL_MC_SYS_H -+ -+#ifdef __linux_driver__ -+ -+#include -+#include -+#include -+ -+struct fsl_mc_io { -+ void *regs; -+}; -+ -+#ifndef ENOTSUP -+#define ENOTSUP 95 -+#endif -+ -+#define ioread64(_p) readq(_p) -+#define iowrite64(_v, _p) writeq(_v, _p) -+ -+#else /* __linux_driver__ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define cpu_to_le64(x) __cpu_to_le64(x) -+#ifndef dmb -+#define dmb() do {\ -+ __asm__ __volatile__ ("" : : : "memory");\ -+} while (0) -+ -+#endif -+#define __iormb() dmb() -+#define __iowmb() dmb() -+#define __arch_getq(a) (*(volatile unsigned long *)(a)) -+#define __arch_putq(v, a) (*(volatile unsigned long *)(a) = (v)) -+#define __arch_putq32(v, a) (*(volatile unsigned int *)(a) = (v)) -+#define readq(c) ({ uint64_t __v = __arch_getq(c); __iormb(); __v; }) -+#define writeq(v, c) ({ uint64_t __v = v; __iowmb(); __arch_putq(__v, c); __v; }) -+#define writeq32(v, c) ({ uint32_t __v = v; __iowmb(); __arch_putq32(__v, c); __v; }) -+#define ioread64(_p) readq(_p) -+#define iowrite64(_v, _p) writeq(_v, _p) -+#define iowrite32(_v, _p) writeq32(_v, _p) -+#define __iomem -+ -+struct fsl_mc_io { -+ void *regs; -+}; -+ -+#ifndef ENOTSUP -+#define ENOTSUP 95 -+#endif -+ -+/*GPP is supposed to use MC commands with low priority*/ -+#define CMD_PRI_LOW 0 /*!< Low Priority command indication */ -+ -+struct mc_command; -+ -+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd); -+ -+#endif /* __linux_driver__ */ -+ -+#endif /* _FSL_MC_SYS_H */ -diff --git a/drivers/net/dpaa2/mc/fsl_net.h b/drivers/net/dpaa2/mc/fsl_net.h -new file mode 100644 -index 0000000..43825b8 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/fsl_net.h -@@ -0,0 +1,480 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_NET_H -+#define __FSL_NET_H -+ -+#define LAST_HDR_INDEX 0xFFFFFFFF -+ -+/*****************************************************************************/ -+/* Protocol fields */ -+/*****************************************************************************/ -+ -+/************************* Ethernet fields *********************************/ -+#define NH_FLD_ETH_DA (1) -+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) -+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) -+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) -+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) -+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) -+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) -+ -+#define NH_FLD_ETH_ADDR_SIZE 6 -+ -+/*************************** VLAN fields ***********************************/ -+#define NH_FLD_VLAN_VPRI (1) -+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) -+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) -+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) -+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) -+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) -+ -+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ -+ NH_FLD_VLAN_CFI | \ -+ NH_FLD_VLAN_VID) -+ -+/************************ IP (generic) fields ******************************/ -+#define NH_FLD_IP_VER (1) -+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) -+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) -+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) -+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) -+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) -+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) -+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) -+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) -+ -+#define NH_FLD_IP_PROTO_SIZE 1 -+ -+/***************************** IPV4 fields *********************************/ -+#define NH_FLD_IPV4_VER (1) -+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) -+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) -+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) -+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) -+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) -+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) -+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) -+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) -+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) -+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) -+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) -+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) -+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) -+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) -+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) -+ -+#define NH_FLD_IPV4_ADDR_SIZE 4 -+#define NH_FLD_IPV4_PROTO_SIZE 1 -+ -+/***************************** IPV6 fields *********************************/ -+#define NH_FLD_IPV6_VER (1) -+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) -+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) -+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) -+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) -+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) -+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) -+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) -+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) -+ -+#define NH_FLD_IPV6_ADDR_SIZE 16 -+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 -+ -+/***************************** ICMP fields *********************************/ -+#define NH_FLD_ICMP_TYPE (1) -+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) -+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) -+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) -+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) -+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) -+ -+#define NH_FLD_ICMP_CODE_SIZE 1 -+#define NH_FLD_ICMP_TYPE_SIZE 1 -+ -+/***************************** IGMP fields *********************************/ -+#define NH_FLD_IGMP_VERSION (1) -+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) -+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) -+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) -+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) -+ -+/***************************** TCP fields **********************************/ -+#define NH_FLD_TCP_PORT_SRC (1) -+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) -+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) -+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) -+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) -+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) -+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) -+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) -+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) -+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) -+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) -+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) -+ -+#define NH_FLD_TCP_PORT_SIZE 2 -+ -+/***************************** UDP fields **********************************/ -+#define NH_FLD_UDP_PORT_SRC (1) -+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) -+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) -+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) -+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) -+ -+#define NH_FLD_UDP_PORT_SIZE 2 -+ -+/*************************** UDP-lite fields *******************************/ -+#define NH_FLD_UDP_LITE_PORT_SRC (1) -+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) -+#define NH_FLD_UDP_LITE_ALL_FIELDS \ -+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) -+ -+#define NH_FLD_UDP_LITE_PORT_SIZE 2 -+ -+/*************************** UDP-encap-ESP fields **************************/ -+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) -+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) -+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) -+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) -+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) -+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) -+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ -+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) -+ -+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 -+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 -+ -+/***************************** SCTP fields *********************************/ -+#define NH_FLD_SCTP_PORT_SRC (1) -+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) -+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) -+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) -+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) -+ -+#define NH_FLD_SCTP_PORT_SIZE 2 -+ -+/***************************** DCCP fields *********************************/ -+#define NH_FLD_DCCP_PORT_SRC (1) -+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) -+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) -+ -+#define NH_FLD_DCCP_PORT_SIZE 2 -+ -+/***************************** IPHC fields *********************************/ -+#define NH_FLD_IPHC_CID (1) -+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) -+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) -+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) -+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) -+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) -+ -+/***************************** SCTP fields *********************************/ -+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) -+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) -+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) -+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) -+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) -+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) -+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) -+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) -+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) -+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) -+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ -+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) -+ -+/*************************** L2TPV2 fields *********************************/ -+#define NH_FLD_L2TPV2_TYPE_BIT (1) -+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) -+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) -+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) -+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) -+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) -+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) -+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) -+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) -+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) -+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) -+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) -+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) -+#define NH_FLD_L2TPV2_ALL_FIELDS \ -+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) -+ -+/*************************** L2TPV3 fields *********************************/ -+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) -+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) -+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) -+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) -+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) -+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) -+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) -+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) -+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) -+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ -+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) -+ -+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) -+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) -+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) -+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) -+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ -+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) -+ -+/**************************** PPP fields ***********************************/ -+#define NH_FLD_PPP_PID (1) -+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) -+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) -+ -+/************************** PPPoE fields ***********************************/ -+#define NH_FLD_PPPOE_VER (1) -+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) -+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) -+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) -+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) -+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) -+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) -+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) -+ -+/************************* PPP-Mux fields **********************************/ -+#define NH_FLD_PPPMUX_PID (1) -+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) -+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) -+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) -+ -+/*********************** PPP-Mux sub-frame fields **************************/ -+#define NH_FLD_PPPMUX_SUBFRM_PFF (1) -+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) -+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) -+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) -+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) -+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ -+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) -+ -+/*************************** LLC fields ************************************/ -+#define NH_FLD_LLC_DSAP (1) -+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) -+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) -+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) -+ -+/*************************** NLPID fields **********************************/ -+#define NH_FLD_NLPID_NLPID (1) -+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) -+ -+/*************************** SNAP fields ***********************************/ -+#define NH_FLD_SNAP_OUI (1) -+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) -+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) -+ -+/*************************** LLC SNAP fields *******************************/ -+#define NH_FLD_LLC_SNAP_TYPE (1) -+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) -+ -+#define NH_FLD_ARP_HTYPE (1) -+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) -+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) -+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) -+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) -+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) -+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) -+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) -+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) -+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) -+ -+/*************************** RFC2684 fields ********************************/ -+#define NH_FLD_RFC2684_LLC (1) -+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) -+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) -+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) -+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) -+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) -+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) -+ -+/*************************** User defined fields ***************************/ -+#define NH_FLD_USER_DEFINED_SRCPORT (1) -+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) -+#define NH_FLD_USER_DEFINED_ALL_FIELDS \ -+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) -+ -+/*************************** Payload fields ********************************/ -+#define NH_FLD_PAYLOAD_BUFFER (1) -+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) -+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) -+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) -+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) -+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) -+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) -+ -+/*************************** GRE fields ************************************/ -+#define NH_FLD_GRE_TYPE (1) -+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) -+ -+/*************************** MINENCAP fields *******************************/ -+#define NH_FLD_MINENCAP_SRC_IP (1) -+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) -+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) -+#define NH_FLD_MINENCAP_ALL_FIELDS \ -+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) -+ -+/*************************** IPSEC AH fields *******************************/ -+#define NH_FLD_IPSEC_AH_SPI (1) -+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) -+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) -+ -+/*************************** IPSEC ESP fields ******************************/ -+#define NH_FLD_IPSEC_ESP_SPI (1) -+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) -+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) -+ -+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 -+ -+/*************************** MPLS fields ***********************************/ -+#define NH_FLD_MPLS_LABEL_STACK (1) -+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ -+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) -+ -+/*************************** MACSEC fields *********************************/ -+#define NH_FLD_MACSEC_SECTAG (1) -+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) -+ -+/*************************** GTP fields ************************************/ -+#define NH_FLD_GTP_TEID (1) -+ -+/* Protocol options */ -+ -+/* Ethernet options */ -+#define NH_OPT_ETH_BROADCAST 1 -+#define NH_OPT_ETH_MULTICAST 2 -+#define NH_OPT_ETH_UNICAST 3 -+#define NH_OPT_ETH_BPDU 4 -+ -+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) -+/* also applicable for broadcast */ -+ -+/* VLAN options */ -+#define NH_OPT_VLAN_CFI 1 -+ -+/* IPV4 options */ -+#define NH_OPT_IPV4_UNICAST 1 -+#define NH_OPT_IPV4_MULTICAST 2 -+#define NH_OPT_IPV4_BROADCAST 3 -+#define NH_OPT_IPV4_OPTION 4 -+#define NH_OPT_IPV4_FRAG 5 -+#define NH_OPT_IPV4_INITIAL_FRAG 6 -+ -+/* IPV6 options */ -+#define NH_OPT_IPV6_UNICAST 1 -+#define NH_OPT_IPV6_MULTICAST 2 -+#define NH_OPT_IPV6_OPTION 3 -+#define NH_OPT_IPV6_FRAG 4 -+#define NH_OPT_IPV6_INITIAL_FRAG 5 -+ -+/* General IP options (may be used for any version) */ -+#define NH_OPT_IP_FRAG 1 -+#define NH_OPT_IP_INITIAL_FRAG 2 -+#define NH_OPT_IP_OPTION 3 -+ -+/* Minenc. options */ -+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 -+ -+/* GRE. options */ -+#define NH_OPT_GRE_ROUTING_PRESENT 1 -+ -+/* TCP options */ -+#define NH_OPT_TCP_OPTIONS 1 -+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 -+#define NH_OPT_TCP_CONTROL_LOW_BITS 3 -+ -+/* CAPWAP options */ -+#define NH_OPT_CAPWAP_DTLS 1 -+ -+enum net_prot { -+ NET_PROT_NONE = 0, -+ NET_PROT_PAYLOAD, -+ NET_PROT_ETH, -+ NET_PROT_VLAN, -+ NET_PROT_IPV4, -+ NET_PROT_IPV6, -+ NET_PROT_IP, -+ NET_PROT_TCP, -+ NET_PROT_UDP, -+ NET_PROT_UDP_LITE, -+ NET_PROT_IPHC, -+ NET_PROT_SCTP, -+ NET_PROT_SCTP_CHUNK_DATA, -+ NET_PROT_PPPOE, -+ NET_PROT_PPP, -+ NET_PROT_PPPMUX, -+ NET_PROT_PPPMUX_SUBFRM, -+ NET_PROT_L2TPV2, -+ NET_PROT_L2TPV3_CTRL, -+ NET_PROT_L2TPV3_SESS, -+ NET_PROT_LLC, -+ NET_PROT_LLC_SNAP, -+ NET_PROT_NLPID, -+ NET_PROT_SNAP, -+ NET_PROT_MPLS, -+ NET_PROT_IPSEC_AH, -+ NET_PROT_IPSEC_ESP, -+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ -+ NET_PROT_MACSEC, -+ NET_PROT_GRE, -+ NET_PROT_MINENCAP, -+ NET_PROT_DCCP, -+ NET_PROT_ICMP, -+ NET_PROT_IGMP, -+ NET_PROT_ARP, -+ NET_PROT_CAPWAP_DATA, -+ NET_PROT_CAPWAP_CTRL, -+ NET_PROT_RFC2684, -+ NET_PROT_ICMPV6, -+ NET_PROT_FCOE, -+ NET_PROT_FIP, -+ NET_PROT_ISCSI, -+ NET_PROT_GTP, -+ NET_PROT_USER_DEFINED_L2, -+ NET_PROT_USER_DEFINED_L3, -+ NET_PROT_USER_DEFINED_L4, -+ NET_PROT_USER_DEFINED_L5, -+ NET_PROT_USER_DEFINED_SHIM1, -+ NET_PROT_USER_DEFINED_SHIM2, -+ -+ NET_PROT_DUMMY_LAST -+}; -+ -+/*! IEEE8021.Q */ -+#define NH_IEEE8021Q_ETYPE 0x8100 -+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ -+ ((((uint32_t)(etype & 0xFFFF)) << 16) | \ -+ (((uint32_t)(pcp & 0x07)) << 13) | \ -+ (((uint32_t)(dei & 0x01)) << 12) | \ -+ (((uint32_t)(vlan_id & 0xFFF)))) -+ -+#endif /* __FSL_NET_H */ -diff --git a/drivers/net/dpaa2/mc/mc_sys.c b/drivers/net/dpaa2/mc/mc_sys.c -new file mode 100644 -index 0000000..fcbed28 ---- /dev/null -+++ b/drivers/net/dpaa2/mc/mc_sys.c -@@ -0,0 +1,127 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+ -+/* ODP framework using MC poratl in shared mode. Following -+ changes to introduce Locks must be maintained while -+ merging the FLIB. -+*/ -+ -+/** -+* The mc_spinlock_t type. -+*/ -+typedef struct { -+ volatile int locked; /**< lock status 0 = unlocked, 1 = locked */ -+} mc_spinlock_t; -+ -+/** -+* A static spinlock initializer. -+*/ -+static mc_spinlock_t mc_portal_lock = { 0 }; -+ -+static inline void mc_pause(void) {} -+ -+static inline void mc_spinlock_lock(mc_spinlock_t *sl) -+{ -+ while (__sync_lock_test_and_set(&sl->locked, 1)) -+ while (sl->locked) -+ mc_pause(); -+} -+ -+static inline void mc_spinlock_unlock(mc_spinlock_t *sl) -+{ -+ __sync_lock_release(&sl->locked); -+} -+ -+static int mc_status_to_error(enum mc_cmd_status status) -+{ -+ switch (status) { -+ case MC_CMD_STATUS_OK: -+ return 0; -+ case MC_CMD_STATUS_AUTH_ERR: -+ return -EACCES; /* Token error */ -+ case MC_CMD_STATUS_NO_PRIVILEGE: -+ return -EPERM; /* Permission denied */ -+ case MC_CMD_STATUS_DMA_ERR: -+ return -EIO; /* Input/Output error */ -+ case MC_CMD_STATUS_CONFIG_ERR: -+ return -EINVAL; /* Device not configured */ -+ case MC_CMD_STATUS_TIMEOUT: -+ return -ETIMEDOUT; /* Operation timed out */ -+ case MC_CMD_STATUS_NO_RESOURCE: -+ return -ENAVAIL; /* Resource temporarily unavailable */ -+ case MC_CMD_STATUS_NO_MEMORY: -+ return -ENOMEM; /* Cannot allocate memory */ -+ case MC_CMD_STATUS_BUSY: -+ return -EBUSY; /* Device busy */ -+ case MC_CMD_STATUS_UNSUPPORTED_OP: -+ return -ENOTSUP; /* Operation not supported by device */ -+ case MC_CMD_STATUS_INVALID_STATE: -+ return -ENODEV; /* Invalid device state */ -+ default: -+ break; -+ } -+ -+ /* Not expected to reach here */ -+ return -EINVAL; -+} -+ -+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) -+{ -+ enum mc_cmd_status status; -+ -+ if (!mc_io || !mc_io->regs) -+ return -EACCES; -+ -+ /* --- Call lock function here in case portal is shared --- */ -+ mc_spinlock_lock(&mc_portal_lock); -+ -+ mc_write_command(mc_io->regs, cmd); -+ -+ /* Spin until status changes */ -+ do { -+ status = MC_CMD_HDR_READ_STATUS(ioread64(mc_io->regs)); -+ -+ /* --- Call wait function here to prevent blocking --- -+ * Change the loop condition accordingly to exit on timeout. -+ */ -+ } while (status == MC_CMD_STATUS_READY); -+ -+ /* Read the response back into the command buffer */ -+ mc_read_response(mc_io->regs, cmd); -+ -+ /* --- Call unlock function here in case portal is shared --- */ -+ mc_spinlock_unlock(&mc_portal_lock); -+ -+ return mc_status_to_error(status); -+} -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_debug.c b/drivers/net/dpaa2/qbman/driver/qbman_debug.c -new file mode 100644 -index 0000000..ef6c257 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_debug.c -@@ -0,0 +1,929 @@ -+/* Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_portal.h" -+#include "qbman_debug.h" -+#include -+ -+/* QBMan portal management command code */ -+#define QBMAN_BP_QUERY 0x32 -+#define QBMAN_FQ_QUERY 0x44 -+#define QBMAN_FQ_QUERY_NP 0x45 -+#define QBMAN_WQ_QUERY 0x47 -+#define QBMAN_CGR_QUERY 0x51 -+#define QBMAN_WRED_QUERY 0x54 -+#define QBMAN_CGR_STAT_QUERY 0x55 -+#define QBMAN_CGR_STAT_QUERY_CLR 0x56 -+ -+enum qbman_attr_usage_e { -+ qbman_attr_usage_fq, -+ qbman_attr_usage_bpool, -+ qbman_attr_usage_cgr, -+ qbman_attr_usage_wqchan -+}; -+ -+struct int_qbman_attr { -+ uint32_t words[32]; -+ enum qbman_attr_usage_e usage; -+}; -+ -+#define attr_type_set(a, e) \ -+{ \ -+ struct qbman_attr *__attr = a; \ -+ enum qbman_attr_usage_e __usage = e; \ -+ ((struct int_qbman_attr *)__attr)->usage = __usage; \ -+} -+ -+#define ATTR32(d) (&(d)->dont_manipulate_directly[0]) -+#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16]) -+ -+static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1); -+static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1); -+static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1); -+static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16); -+static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16); -+static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16); -+static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16); -+static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16); -+static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16); -+static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14); -+static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15); -+static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1); -+static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32); -+static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32); -+static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32); -+static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32); -+static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16); -+static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3); -+static struct qb_attr_code code_bp_fill = QB_CODE(2, 0, 32); -+static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32); -+static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8); -+static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 8, 8); -+static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 16, 8); -+ -+static void qbman_bp_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_bpool); -+} -+ -+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, -+ struct qbman_attr *a) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ uint32_t *attr = ATTR32(a); -+ -+ qbman_bp_attr_clear(a); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_bp_bpid, p, bpid); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_BP_QUERY); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_BP_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt); -+ return -EIO; -+ } -+ -+ /* For the query, word[0] of the result contains only the -+ * verb/rslt fields, so skip word[0]. -+ */ -+ word_copy(&attr[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p); -+ *va = !!qb_attr_code_decode(&code_bp_va, p); -+ *wae = !!qb_attr_code_decode(&code_bp_wae, p); -+} -+ -+static uint32_t qbman_bp_thresh_to_value(uint32_t val) -+{ -+ return (val & 0xff) << ((val & 0xf00) >> 8); -+} -+ -+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet, -+ p)); -+} -+ -+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt, -+ p)); -+} -+ -+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet, -+ p)); -+} -+ -+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt, -+ p)); -+} -+ -+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset, -+ p)); -+} -+ -+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt, -+ p)); -+} -+ -+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p); -+} -+ -+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *icid = qb_attr_code_decode(&code_bp_icid, p); -+ *pl = !!qb_attr_code_decode(&code_bp_pl, p); -+} -+ -+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bpscn_addr = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_hi, -+ p) << 32) | -+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_lo, -+ p); -+} -+ -+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bpscn_ctx = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p) -+ << 32) | -+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_lo, -+ p); -+} -+ -+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p); -+} -+ -+int qbman_bp_info_has_free_bufs(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1); -+} -+ -+int qbman_bp_info_is_depleted(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2); -+} -+ -+int qbman_bp_info_is_surplus(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4); -+} -+ -+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_fill, p); -+} -+ -+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_hdptr, p); -+} -+ -+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_sdcnt, p); -+} -+ -+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_hdcnt, p); -+} -+ -+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_sscnt, p); -+} -+ -+static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16); -+static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15); -+static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8); -+static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15); -+static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13); -+static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12); -+static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1); -+static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1); -+static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1); -+static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1); -+static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1); -+static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1); -+static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32); -+static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32); -+static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15); -+static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1); -+static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24); -+static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24); -+ -+static void qbman_fq_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_fq); -+} -+ -+/* FQ query function for programmable fields */ -+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, struct qbman_attr *desc) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ uint32_t *d = ATTR32(desc); -+ -+ qbman_fq_attr_clear(desc); -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ qb_attr_code_encode(&code_fq_fqid, p, fqid); -+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_FQ_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of FQID 0x%x failed, code=0x%02x\n", -+ fqid, rslt); -+ return -EIO; -+ } -+ /* For the configure, word[0] of the command contains only the WE-mask. -+ * For the query, word[0] of the result contains only the verb/rslt -+ * fields. Skip word[0] in the latter case. */ -+ word_copy(&d[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p); -+} -+ -+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p); -+} -+ -+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *destwq = qb_attr_code_decode(&code_fq_destwq, p); -+} -+ -+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *icscred = qb_attr_code_decode(&code_fq_icscred, p); -+} -+ -+static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5); -+static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8); -+static uint32_t qbman_thresh_to_value(uint32_t val) -+{ -+ uint32_t m, e; -+ -+ m = qb_attr_code_decode(&code_tdthresh_mant, &val); -+ e = qb_attr_code_decode(&code_tdthresh_exp, &val); -+ return m << e; -+} -+ -+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh, -+ p)); -+} -+ -+void qbman_fq_attr_get_oa(struct qbman_attr *d, -+ int *oa_ics, int *oa_cgr, int32_t *oa_len) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p); -+ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p); -+ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len, -+ qb_attr_code_decode(&code_fq_oa_len, p)); -+} -+ -+void qbman_fq_attr_get_mctl(struct qbman_attr *d, -+ int *bdi, int *ff, int *va, int *ps) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p); -+ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p); -+ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p); -+ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p); -+} -+ -+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p); -+ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p); -+} -+ -+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *icid = qb_attr_code_decode(&code_fq_icid, p); -+ *pl = !!qb_attr_code_decode(&code_fq_pl, p); -+} -+ -+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p); -+} -+ -+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p); -+} -+ -+/* Query FQ Non-Programmalbe Fields */ -+static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3); -+static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1); -+static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1); -+static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1); -+static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1); -+static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24); -+static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32); -+ -+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *state) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ uint32_t *d = ATTR32(state); -+ -+ qbman_fq_attr_clear(state); -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ qb_attr_code_encode(&code_fq_fqid, p, fqid); -+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_FQ_QUERY_NP); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n", -+ fqid, rslt); -+ return -EIO; -+ } -+ word_copy(&d[0], &p[0], 16); -+ return 0; -+} -+ -+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_state, p); -+} -+ -+int qbman_fq_state_force_eligible(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_fe, p); -+} -+ -+int qbman_fq_state_xoff(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_x, p); -+} -+ -+int qbman_fq_state_retirement_pending(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_r, p); -+} -+ -+int qbman_fq_state_overflow_error(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_oe, p); -+} -+ -+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_frm_cnt, p); -+} -+ -+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_byte_cnt, p); -+} -+ -+/* Query CGR */ -+static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1); -+static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1); -+static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1); -+static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2); -+static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1); -+static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1); -+static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1); -+static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1); -+static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1); -+static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1); -+static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1); -+static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1); -+static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5); -+static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1); -+static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13); -+static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13); -+static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13); -+static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16); -+static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16); -+static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16); -+static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15); -+static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1); -+static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32); -+static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32); -+static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32); -+static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32); -+ -+static void qbman_cgr_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_cgr); -+} -+ -+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, struct qbman_attr *attr) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *d[2]; -+ int i; -+ uint32_t query_verb; -+ -+ d[0] = ATTR32(attr); -+ d[1] = ATTR32_1(attr); -+ -+ qbman_cgr_attr_clear(attr); -+ -+ for (i = 0; i < 2; i++) { -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY; -+ -+ qb_attr_code_encode(&code_cgr_cgid, p, cgid); -+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != query_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query CGID 0x%x failed,", cgid); -+ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt); -+ return -EIO; -+ } -+ /* For the configure, word[0] of the command contains only the -+ * verb/cgid. For the query, word[0] of the result contains -+ * only the verb/rslt fields. Skip word[0] in the latter case. -+ */ -+ word_copy(&d[i][1], &p[1], 15); -+ } -+ return 0; -+} -+ -+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, -+ int *cscn_wq_en_exit, int *cscn_wq_icd) -+ { -+ uint32_t *p = ATTR32(d); -+ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter, -+ p); -+ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p); -+ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p); -+} -+ -+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, -+ int *rej_cnt_mode, int *cscn_bdi) -+{ -+ uint32_t *p = ATTR32(d); -+ *mode = qb_attr_code_decode(&code_cgr_mode, p); -+ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p); -+ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p); -+} -+ -+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, -+ int *cscn_wr_en_exit, int *cg_wr_ae, -+ int *cscn_dcp_en, int *cg_wr_va) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter, -+ p); -+ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p); -+ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p); -+ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p); -+ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p); -+} -+ -+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, -+ uint32_t *i_cnt_wr_bnd) -+{ -+ uint32_t *p = ATTR32(d); -+ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p); -+ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p); -+} -+ -+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en) -+{ -+ uint32_t *p = ATTR32(d); -+ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p); -+} -+ -+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres) -+{ -+ uint32_t *p = ATTR32(d); -+ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_cs_thres, p)); -+} -+ -+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, -+ uint32_t *cs_thres_x) -+{ -+ uint32_t *p = ATTR32(d); -+ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_cs_thres_x, p)); -+} -+ -+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres) -+{ -+ uint32_t *p = ATTR32(d); -+ *td_thres = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_td_thres, p)); -+} -+ -+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p); -+} -+ -+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p); -+} -+ -+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, -+ uint32_t *cscn_vcgid) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p); -+} -+ -+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, -+ int *pl) -+{ -+ uint32_t *p = ATTR32(d); -+ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p); -+ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p); -+} -+ -+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, -+ uint64_t *cg_wr_addr) -+{ -+ uint32_t *p = ATTR32(d); -+ *cg_wr_addr = ((uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi, -+ p) << 32) | -+ (uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo, -+ p); -+} -+ -+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_ctx = ((uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p) -+ << 32) | -+ (uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p); -+} -+ -+#define WRED_EDP_WORD(n) (18 + n / 4) -+#define WRED_EDP_OFFSET(n) (8 * (n % 4)) -+#define WRED_PARM_DP_WORD(n) (n + 20) -+#define WRED_WE_EDP(n) (16 + n * 2) -+#define WRED_WE_PARM_DP(n) (17 + n * 2) -+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, -+ int *edp) -+{ -+ uint32_t *p = ATTR32(d); -+ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx), -+ WRED_EDP_OFFSET(idx), 8); -+ *edp = (int)qb_attr_code_decode(&code_wred_edp, p); -+} -+ -+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, -+ uint64_t *maxth, uint8_t *maxp) -+{ -+ uint8_t ma, mn, step_i, step_s, pn; -+ -+ ma = (uint8_t)(dp >> 24); -+ mn = (uint8_t)(dp >> 19) & 0x1f; -+ step_i = (uint8_t)(dp >> 11); -+ step_s = (uint8_t)(dp >> 6) & 0x1f; -+ pn = (uint8_t)dp & 0x3f; -+ -+ *maxp = (uint8_t)(((pn << 2) * 100) / 256); -+ -+ if (mn == 0) -+ *maxth = ma; -+ else -+ *maxth = ((ma + 256) * (1 << (mn - 1))); -+ -+ if (step_s == 0) -+ *minth = *maxth - step_i; -+ else -+ *minth = *maxth - (256 + step_i) * (1 << (step_s - 1)); -+} -+ -+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, -+ uint32_t *dp) -+{ -+ uint32_t *p = ATTR32(d); -+ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx), -+ 0, 8); -+ *dp = qb_attr_code_decode(&code_wred_parm_dp, p); -+} -+ -+/* Query CGR/CCGR/CQ statistics */ -+static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8); -+static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32); -+static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16); -+static int qbman_cgr_statistics_query(struct qbman_swp *s, uint32_t cgid, -+ int clear, uint32_t command_type, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t query_verb; -+ uint32_t hi, lo; -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ qb_attr_code_encode(&code_cgr_cgid, p, cgid); -+ if (command_type < 2) -+ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type); -+ query_verb = clear ? -+ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY; -+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != query_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query statistics of CGID 0x%x failed,", cgid); -+ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt); -+ return -EIO; -+ } -+ -+ if (*frame_cnt) { -+ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p); -+ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p); -+ *frame_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; -+ } -+ if (*byte_cnt) { -+ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p); -+ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p); -+ *byte_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; -+ } -+ -+ return 0; -+} -+ -+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 0xff, -+ frame_cnt, byte_cnt); -+} -+ -+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 1, -+ frame_cnt, byte_cnt); -+} -+ -+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 0, -+ frame_cnt, byte_cnt); -+} -+ -+/* WQ Chan Query */ -+static struct qb_attr_code code_wqchan_chanid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_wqchan_cdan_ctx_lo = QB_CODE(2, 0, 32); -+static struct qb_attr_code code_wqchan_cdan_ctx_hi = QB_CODE(3, 0, 32); -+static struct qb_attr_code code_wqchan_cdan_wqid = QB_CODE(1, 16, 16); -+static struct qb_attr_code code_wqchan_ctrl = QB_CODE(1, 8, 8); -+ -+static void qbman_wqchan_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_wqchan); -+} -+ -+int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid, -+ struct qbman_attr *a) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ uint32_t *attr = ATTR32(a); -+ -+ qbman_wqchan_attr_clear(a); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_wqchan_chanid, p, chanid); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQ_QUERY); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p); != QBMAN_WQ_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of WQCHAN 0x%x failed, code=0x%02x\n", -+ chanid, rslt); -+ return -EIO; -+ } -+ -+ /* For the query, word[0] of the result contains only the -+ * verb/rslt fields, so skip word[0]. -+ */ -+ word_copy(&attr[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_wqchan_attr_get_wqlen(struct qbman_attr *attr, int wq, uint32_t *len) -+{ -+ uint32_t *p = ATTR32(attr); -+ struct qb_attr_code code_wqchan_len = QB_CODE(wq + 8, 0, 24); -+ *len = qb_attr_code_decode(&code_wqchan_len, p); -+} -+ -+void qbman_wqchan_attr_get_cdan_ctx(struct qbman_attr *attr, uint64_t *cdan_ctx) -+{ -+ uint32_t lo, hi; -+ uint32_t *p = ATTR32(attr); -+ -+ lo = qb_attr_code_decode(&code_wqchan_cdan_ctx_lo, p); -+ hi = qb_attr_code_decode(&code_wqchan_cdan_ctx_hi, p); -+ *cdan_ctx = ((uint64_t)hi << 32) | (uint64_t)lo; -+} -+ -+void qbman_wqchan_attr_get_cdan_wqid(struct qbman_attr *attr, -+ uint16_t *cdan_wqid) -+{ -+ uint32_t *p = ATTR32(attr); -+ *cdan_wqid = (uint16_t)qb_attr_code_decode(&code_wqchan_cdan_wqid, p); -+} -+ -+void qbman_wqchan_attr_get_ctrl(struct qbman_attr *attr, uint8_t *ctrl) -+{ -+ uint32_t *p = ATTR32(attr); -+ *ctrl = (uint8_t)qb_attr_code_decode(&code_wqchan_ctrl, p); -+} -+ -+void qbman_wqchan_attr_get_chanid(struct qbman_attr *attr, uint16_t *chanid) -+{ -+ uint32_t *p = ATTR32(attr); -+ *chanid = (uint16_t)qb_attr_code_decode(&code_wqchan_chanid, p); -+} -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_debug.h b/drivers/net/dpaa2/qbman/driver/qbman_debug.h -new file mode 100644 -index 0000000..4d586a6 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_debug.h -@@ -0,0 +1,140 @@ -+/* Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+struct qbman_attr { -+ uint32_t dont_manipulate_directly[40]; -+}; -+ -+/* Buffer pool query commands */ -+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, -+ struct qbman_attr *a); -+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae); -+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet); -+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt); -+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet); -+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt); -+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset); -+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt); -+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid); -+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl); -+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr); -+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx); -+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ); -+int qbman_bp_info_has_free_bufs(struct qbman_attr *a); -+int qbman_bp_info_is_depleted(struct qbman_attr *a); -+int qbman_bp_info_is_surplus(struct qbman_attr *a); -+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a); -+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a); -+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a); -+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a); -+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a); -+ -+/* FQ query function for programmable fields */ -+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *desc); -+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl); -+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid); -+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq); -+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred); -+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh); -+void qbman_fq_attr_get_oa(struct qbman_attr *d, -+ int *oa_ics, int *oa_cgr, int32_t *oa_len); -+void qbman_fq_attr_get_mctl(struct qbman_attr *d, -+ int *bdi, int *ff, int *va, int *ps); -+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo); -+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl); -+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid); -+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid); -+ -+/* FQ query command for non-programmable fields*/ -+enum qbman_fq_schedstate_e { -+ qbman_fq_schedstate_oos = 0, -+ qbman_fq_schedstate_retired, -+ qbman_fq_schedstate_tentatively_scheduled, -+ qbman_fq_schedstate_truly_scheduled, -+ qbman_fq_schedstate_parked, -+ qbman_fq_schedstate_held_active, -+}; -+ -+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *state); -+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state); -+int qbman_fq_state_force_eligible(const struct qbman_attr *state); -+int qbman_fq_state_xoff(const struct qbman_attr *state); -+int qbman_fq_state_retirement_pending(const struct qbman_attr *state); -+int qbman_fq_state_overflow_error(const struct qbman_attr *state); -+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state); -+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state); -+ -+/* CGR query */ -+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, -+ struct qbman_attr *attr); -+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, -+ int *cscn_wq_en_exit, int *cscn_wq_icd); -+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, -+ int *rej_cnt_mode, int *cscn_bdi); -+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, -+ int *cscn_wr_en_exit, int *cg_wr_ae, -+ int *cscn_dcp_en, int *cg_wr_va); -+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, -+ uint32_t *i_cnt_wr_bnd); -+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en); -+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres); -+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, -+ uint32_t *cs_thres_x); -+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres); -+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp); -+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid); -+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, -+ uint32_t *cscn_vcgid); -+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, -+ int *pl); -+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, -+ uint64_t *cg_wr_addr); -+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx); -+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, -+ int *edp); -+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, -+ uint64_t *maxth, uint8_t *maxp); -+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, -+ uint32_t *dp); -+ -+/* CGR/CCGR/CQ statistics query */ -+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+ -+/* Query Work Queue Channel */ -+int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid, -+ struct qbman_attr *attr); -+void qbman_wqchan_attr_get_wqlen(struct qbman_attr *attr, int wq, uint32_t *len); -+void qbman_wqchan_attr_get_cdan_ctx(struct qbman_attr *attr, uint64_t *cdan_ctx); -+void qbman_wqchan_attr_get_cdan_wqid(struct qbman_attr *attr, -+ uint16_t *cdan_wqid); -+void qbman_wqchan_attr_get_ctrl(struct qbman_attr *attr, uint8_t *ctrl); -+void qbman_wqchan_attr_get_chanid(struct qbman_attr *attr, uint16_t *chanid); -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_portal.c b/drivers/net/dpaa2/qbman/driver/qbman_portal.c -new file mode 100644 -index 0000000..52e1f64 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_portal.c -@@ -0,0 +1,1441 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_portal.h" -+ -+/* QBMan portal management command codes */ -+#define QBMAN_MC_ACQUIRE 0x30 -+#define QBMAN_WQCHAN_CONFIGURE 0x46 -+ -+/* CINH register offsets */ -+#define QBMAN_CINH_SWP_EQCR_PI 0x800 -+#define QBMAN_CINH_SWP_EQCR_CI 0x840 -+#define QBMAN_CINH_SWP_EQAR 0x8c0 -+#define QBMAN_CINH_SWP_DQPI 0xa00 -+#define QBMAN_CINH_SWP_DCAP 0xac0 -+#define QBMAN_CINH_SWP_SDQCR 0xb00 -+#define QBMAN_CINH_SWP_RAR 0xcc0 -+#define QBMAN_CINH_SWP_ISR 0xe00 -+#define QBMAN_CINH_SWP_IER 0xe40 -+#define QBMAN_CINH_SWP_ISDR 0xe80 -+#define QBMAN_CINH_SWP_IIR 0xec0 -+ -+/* CENA register offsets */ -+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_CR 0x600 -+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1)) -+#define QBMAN_CENA_SWP_VDQCR 0x780 -+#define QBMAN_CENA_SWP_EQCR_CI 0x840 -+ -+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ -+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6) -+ -+/* QBMan FQ management command codes */ -+#define QBMAN_FQ_SCHEDULE 0x48 -+#define QBMAN_FQ_FORCE 0x49 -+#define QBMAN_FQ_XON 0x4d -+#define QBMAN_FQ_XOFF 0x4e -+ -+/*******************************/ -+/* Pre-defined attribute codes */ -+/*******************************/ -+ -+struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7); -+struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8); -+ -+/*************************/ -+/* SDQCR attribute codes */ -+/*************************/ -+ -+/* we put these here because at least some of them are required by -+ * qbman_swp_init() */ -+struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2); -+struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1); -+struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8); -+static struct qb_attr_code code_eq_dca_idx; -+#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1) -+enum qbman_sdqcr_dct { -+ qbman_sdqcr_dct_null = 0, -+ qbman_sdqcr_dct_prio_ics, -+ qbman_sdqcr_dct_active_ics, -+ qbman_sdqcr_dct_active -+}; -+ -+enum qbman_sdqcr_fc { -+ qbman_sdqcr_fc_one = 0, -+ qbman_sdqcr_fc_up_to_3 = 1 -+}; -+ -+struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16); -+ -+/*********************************/ -+/* Portal constructor/destructor */ -+/*********************************/ -+ -+/* Software portals should always be in the power-on state when we initialise, -+ * due to the CCSR-based portal reset functionality that MC has. -+ * -+ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR -+ * valid-bits, so we need to support a workaround where we don't trust -+ * valid-bits when detecting new entries until any stale ring entries have been -+ * overwritten at least once. The idea is that we read PI for the first few -+ * entries, then switch to valid-bit after that. The trick is to clear the -+ * bug-work-around boolean once the PI wraps around the ring for the first time. -+ * -+ * Note: this still carries a slight additional cost once the decrementer hits -+ * zero. -+ */ -+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) -+{ -+ int ret; -+ uint32_t eqcr_pi; -+ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL); -+ -+ if (!p) -+ return NULL; -+ p->desc = d; -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_start; -+#endif -+ p->mc.valid_bit = QB_VALID_BIT; -+ p->sdq = 0; -+ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics); -+ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3); -+ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb); -+ atomic_set(&p->vdq.busy, 1); -+ p->vdq.valid_bit = QB_VALID_BIT; -+ p->dqrr.next_idx = 0; -+ p->dqrr.valid_bit = QB_VALID_BIT; -+ qman_version = p->desc->qman_version; -+ if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) { -+ p->dqrr.dqrr_size = 4; -+ p->dqrr.reset_bug = 1; -+ /* Set size of DQRR to 4, encoded in 2 bits */ -+ code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 2); -+ } else { -+ p->dqrr.dqrr_size = 8; -+ p->dqrr.reset_bug = 0; -+ /* Set size of DQRR to 8, encoded in 3 bits */ -+ code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 3); -+ } -+ -+ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size); -+ if (ret) { -+ kfree(p); -+ pr_err("qbman_swp_sys_init() failed %d\n", ret); -+ return NULL; -+ } -+ /* SDQCR needs to be initialized to 0 when no channels are -+ being dequeued from or else the QMan HW will indicate an -+ error. The values that were calculated above will be -+ applied when dequeues from a specific channel are enabled */ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0); -+ eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI); -+ p->eqcr.pi = eqcr_pi & 0xF; -+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT; -+ p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF; -+ p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE, -+ p->eqcr.ci, p->eqcr.pi); -+ -+ return p; -+} -+ -+void qbman_swp_finish(struct qbman_swp *p) -+{ -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_start); -+#endif -+ qbman_swp_sys_finish(&p->sys); -+ kfree(p); -+} -+ -+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p) -+{ -+ return p->desc; -+} -+ -+/**************/ -+/* Interrupts */ -+/**************/ -+ -+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR); -+} -+ -+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask); -+} -+ -+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR); -+} -+ -+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask); -+} -+ -+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER); -+} -+ -+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask); -+} -+ -+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR); -+} -+ -+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0); -+} -+ -+/***********************/ -+/* Management commands */ -+/***********************/ -+ -+/* -+ * Internal code common to all types of management commands. -+ */ -+ -+void *qbman_swp_mc_start(struct qbman_swp *p) -+{ -+ void *ret; -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_start); -+#endif -+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR); -+#ifdef QBMAN_CHECKING -+ if (!ret) -+ p->mc.check = swp_mc_can_submit; -+#endif -+ return ret; -+} -+ -+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb) -+{ -+ uint32_t *v = cmd; -+#ifdef QBMAN_CHECKING -+ BUG_ON(!p->mc.check != swp_mc_can_submit); -+#endif -+ /* TBD: "|=" is going to hurt performance. Need to move as many fields -+ * out of word zero, and for those that remain, the "OR" needs to occur -+ * at the caller side. This debug check helps to catch cases where the -+ * caller wants to OR but has forgotten to do so. */ -+ BUG_ON((*v & cmd_verb) != *v); -+ *v = cmd_verb | p->mc.valid_bit; -+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd); -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_poll; -+#endif -+} -+ -+void *qbman_swp_mc_result(struct qbman_swp *p) -+{ -+ uint32_t *ret, verb; -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_poll); -+#endif -+ qbman_cena_invalidate_prefetch(&p->sys, -+ QBMAN_CENA_SWP_RR(p->mc.valid_bit)); -+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); -+ /* Remove the valid-bit - command completed iff the rest is non-zero */ -+ verb = ret[0] & ~QB_VALID_BIT; -+ if (!verb) -+ return NULL; -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_start; -+#endif -+ p->mc.valid_bit ^= QB_VALID_BIT; -+ return ret; -+} -+ -+/***********/ -+/* Enqueue */ -+/***********/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2); -+static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1); -+static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1); -+static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1); -+/* Can't set code_eq_dca_idx width. Need qman version. Read at runtime */ -+static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1); -+static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1); -+static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1); -+static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14); -+static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16); -+static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24); -+/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */ -+static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1); -+static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16); -+static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4); -+static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1); -+static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8); -+static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32); -+ -+enum qbman_eq_cmd_e { -+ /* No enqueue, primarily for plugging ORP gaps for dropped frames */ -+ qbman_eq_cmd_empty, -+ /* DMA an enqueue response once complete */ -+ qbman_eq_cmd_respond, -+ /* DMA an enqueue response only if the enqueue fails */ -+ qbman_eq_cmd_respond_reject -+}; -+ -+void qbman_eq_desc_clear(struct qbman_eq_desc *d) -+{ -+ memset(d, 0, sizeof(*d)); -+} -+ -+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 0); -+ qb_attr_code_encode(&code_eq_cmd, cl, -+ respond_success ? qbman_eq_cmd_respond : -+ qbman_eq_cmd_respond_reject); -+} -+ -+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, -+ uint32_t opr_id, uint32_t seqnum, int incomplete) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, -+ respond_success ? qbman_eq_cmd_respond : -+ qbman_eq_cmd_respond_reject); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete); -+} -+ -+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); -+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0); -+} -+ -+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); -+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1); -+} -+ -+void qbman_eq_desc_set_response(struct qbman_eq_desc *d, -+ dma_addr_t storage_phys, -+ int stash) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys); -+ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash); -+} -+ -+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token); -+} -+ -+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_qd_en, cl, 0); -+ qb_attr_code_encode(&code_eq_tgt_id, cl, fqid); -+} -+ -+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid, -+ uint32_t qd_bin, uint32_t qd_prio) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_qd_en, cl, 1); -+ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid); -+ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin); -+ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio); -+} -+ -+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_eqdi, cl, !!enable); -+} -+ -+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable, -+ uint32_t dqrr_idx, int park) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_dca_en, cl, !!enable); -+ if (enable) { -+ qb_attr_code_encode(&code_eq_dca_pk, cl, !!park); -+ qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx); -+ } -+} -+ -+#define EQAR_IDX(eqar) ((eqar) & 0x7) -+#define EQAR_VB(eqar) ((eqar) & 0x80) -+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100) -+static int qbman_swp_enqueue_array_mode(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR); -+ -+ pr_debug("EQAR=%08x\n", eqar); -+ if (!EQAR_SUCCESS(eqar)) -+ return -EBUSY; -+ p = qbman_cena_write_start_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); -+ word_copy(&p[1], &cl[1], 7); -+ word_copy(&p[8], fd, sizeof(*fd) >> 2); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ lwsync(); -+ p[0] = cl[0] | EQAR_VB(eqar); -+ qbman_cena_write_complete_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); -+ return 0; -+} -+ -+static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t eqcr_ci; -+ uint8_t diff; -+ -+ if (!s->eqcr.available) { -+ eqcr_ci = s->eqcr.ci; -+ s->eqcr.ci = qbman_cena_read_reg(&s->sys, -+ QBMAN_CENA_SWP_EQCR_CI) & 0xF; -+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE, -+ eqcr_ci, s->eqcr.ci); -+ s->eqcr.available += diff; -+ if (!diff) -+ return -EBUSY; -+ } -+ -+ p = qbman_cena_write_start_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7)); -+ word_copy(&p[1], &cl[1], 7); -+ word_copy(&p[8], fd, sizeof(*fd) >> 2); -+ lwsync(); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ p[0] = cl[0] | s->eqcr.pi_vb; -+ qbman_cena_write_complete_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7)); -+ s->eqcr.pi++; -+ s->eqcr.pi &= 0xF; -+ s->eqcr.available--; -+ if (!(s->eqcr.pi & 7)) -+ s->eqcr.pi_vb ^= QB_VALID_BIT; -+ return 0; -+} -+ -+int qbman_swp_fill_ring(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd, -+ __attribute__((unused)) uint8_t burst_index) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t eqcr_ci; -+ uint8_t diff; -+ -+ if (!s->eqcr.available) { -+ eqcr_ci = s->eqcr.ci; -+ s->eqcr.ci = qbman_cena_read_reg(&s->sys, -+ QBMAN_CENA_SWP_EQCR_CI) & 0xF; -+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE, -+ eqcr_ci, s->eqcr.ci); -+ s->eqcr.available += diff; -+ if (!diff) -+ return -EBUSY; -+ -+ } -+ p = qbman_cena_write_start_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7)); -+ /* word_copy(&p[1], &cl[1], 7); */ -+ memcpy(&p[1], &cl[1], 7*4); -+ /* word_copy(&p[8], fd, sizeof(*fd) >> 2); */ -+ memcpy(&p[8], fd, sizeof(struct qbman_fd)); -+ -+ /* lwsync(); */ -+ p[0] = cl[0] | s->eqcr.pi_vb; -+ -+ s->eqcr.pi++; -+ s->eqcr.pi &= 0xF; -+ s->eqcr.available--; -+ if (!(s->eqcr.pi & 7)) -+ s->eqcr.pi_vb ^= QB_VALID_BIT; -+ -+ return 0; -+} -+ -+int qbman_swp_flush_ring(struct qbman_swp *s) -+{ -+ void *ptr = s->sys.addr_cena; -+ -+ dcbf((uint64_t)ptr); -+ dcbf((uint64_t)ptr + 0x40); -+ dcbf((uint64_t)ptr + 0x80); -+ dcbf((uint64_t)ptr + 0xc0); -+ dcbf((uint64_t)ptr + 0x100); -+ dcbf((uint64_t)ptr + 0x140); -+ dcbf((uint64_t)ptr + 0x180); -+ dcbf((uint64_t)ptr + 0x1c0); -+ -+ return 0; -+} -+ -+void qbman_sync(void) -+{ -+ lwsync(); -+} -+ -+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd) -+{ -+ if (s->sys.eqcr_mode == qman_eqcr_vb_array) -+ return qbman_swp_enqueue_array_mode(s, d, fd); -+ else /* Use ring mode by default */ -+ return qbman_swp_enqueue_ring_mode(s, d, fd); -+} -+ -+/*************************/ -+/* Static (push) dequeue */ -+/*************************/ -+ -+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled) -+{ -+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); -+ -+ BUG_ON(channel_idx > 15); -+ *enabled = (int)qb_attr_code_decode(&code, &s->sdq); -+} -+ -+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable) -+{ -+ uint16_t dqsrc; -+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); -+ -+ BUG_ON(channel_idx > 15); -+ qb_attr_code_encode(&code, &s->sdq, !!enable); -+ /* Read make the complete src map. If no channels are enabled -+ the SDQCR must be 0 or else QMan will assert errors */ -+ dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq); -+ if (dqsrc != 0) -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq); -+ else -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0); -+} -+ -+/***************************/ -+/* Volatile (pull) dequeue */ -+/***************************/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2); -+static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2); -+static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1); -+static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1); -+static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4); -+static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8); -+static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32); -+ -+enum qb_pull_dt_e { -+ qb_pull_dt_channel, -+ qb_pull_dt_workqueue, -+ qb_pull_dt_framequeue -+}; -+ -+void qbman_pull_desc_clear(struct qbman_pull_desc *d) -+{ -+ memset(d, 0, sizeof(*d)); -+} -+ -+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, -+ struct qbman_result *storage, -+ dma_addr_t storage_phys, -+ int stash) -+{ -+ uint32_t *cl = qb_cl(d); -+ /* Squiggle the pointer 'storage' into the extra 2 words of the -+ * descriptor (which aren't copied to the hw command) */ -+ *(void **)&cl[4] = storage; -+ if (!storage) { -+ qb_attr_code_encode(&code_pull_rls, cl, 0); -+ return; -+ } -+ qb_attr_code_encode(&code_pull_rls, cl, 1); -+ qb_attr_code_encode(&code_pull_stash, cl, !!stash); -+ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys); -+} -+ -+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ BUG_ON(!numframes || (numframes > 16)); -+ qb_attr_code_encode(&code_pull_numframes, cl, -+ (uint32_t)(numframes - 1)); -+} -+ -+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_token, cl, token); -+} -+ -+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, 1); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue); -+ qb_attr_code_encode(&code_pull_dqsource, cl, fqid); -+} -+ -+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid, -+ enum qbman_pull_type_e dct) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, dct); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue); -+ qb_attr_code_encode(&code_pull_dqsource, cl, wqid); -+} -+ -+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid, -+ enum qbman_pull_type_e dct) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, dct); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel); -+ qb_attr_code_encode(&code_pull_dqsource, cl, chid); -+} -+ -+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) -+{ -+ uint32_t *p; -+ uint32_t *cl = qb_cl(d); -+ -+ if (!atomic_dec_and_test(&s->vdq.busy)) { -+ atomic_inc(&s->vdq.busy); -+ return -EBUSY; -+ } -+ s->vdq.storage = *(void **)&cl[4]; -+ qb_attr_code_encode(&code_pull_token, cl, 1); -+ p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR); -+ word_copy(&p[1], &cl[1], 3); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ lwsync(); -+ p[0] = cl[0] | s->vdq.valid_bit; -+ s->vdq.valid_bit ^= QB_VALID_BIT; -+ qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR); -+ return 0; -+} -+ -+/****************/ -+/* Polling DQRR */ -+/****************/ -+ -+static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8); -+static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7); -+static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8); -+static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14); -+static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16); -+/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */ -+static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24); -+static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24); -+static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32); -+ -+#define QBMAN_RESULT_DQ 0x60 -+#define QBMAN_RESULT_FQRN 0x21 -+#define QBMAN_RESULT_FQRNI 0x22 -+#define QBMAN_RESULT_FQPN 0x24 -+#define QBMAN_RESULT_FQDAN 0x25 -+#define QBMAN_RESULT_CDAN 0x26 -+#define QBMAN_RESULT_CSCN_MEM 0x27 -+#define QBMAN_RESULT_CGCU 0x28 -+#define QBMAN_RESULT_BPSCN 0x29 -+#define QBMAN_RESULT_CSCN_WQ 0x2a -+ -+static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4); -+ -+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry -+ * only once, so repeated calls can return a sequence of DQRR entries, without -+ * requiring they be consumed immediately or in any particular order. */ -+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s) -+{ -+ uint32_t verb; -+ uint32_t response_verb; -+ uint32_t flags; -+ const struct qbman_result *dq; -+ const uint32_t *p; -+ -+ /* Before using valid-bit to detect if something is there, we have to -+ * handle the case of the DQRR reset bug... */ -+ if (unlikely(s->dqrr.reset_bug)) { -+ /* We pick up new entries by cache-inhibited producer index, -+ * which means that a non-coherent mapping would require us to -+ * invalidate and read *only* once that PI has indicated that -+ * there's an entry here. The first trip around the DQRR ring -+ * will be much less efficient than all subsequent trips around -+ * it... -+ */ -+ uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI); -+ uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi); -+ /* there are new entries iff pi != next_idx */ -+ if (pi == s->dqrr.next_idx) -+ return NULL; -+ /* if next_idx is/was the last ring index, and 'pi' is -+ * different, we can disable the workaround as all the ring -+ * entries have now been DMA'd to so valid-bit checking is -+ * repaired. Note: this logic needs to be based on next_idx -+ * (which increments one at a time), rather than on pi (which -+ * can burst and wrap-around between our snapshots of it). -+ */ -+ BUG_ON((s->dqrr.dqrr_size - 1) < 0); -+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) { -+ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n", -+ s->dqrr.next_idx, pi); -+ s->dqrr.reset_bug = 0; -+ } -+ qbman_cena_invalidate_prefetch(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ } -+ dq = qbman_cena_read_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ p = qb_cl(dq); -+ verb = qb_attr_code_decode(&code_dqrr_verb, p); -+ /* If the valid-bit isn't of the expected polarity, nothing there. Note, -+ * in the DQRR reset bug workaround, we shouldn't need to skip these -+ * check, because we've already determined that a new entry is available -+ * and we've invalidated the cacheline before reading it, so the -+ * valid-bit behaviour is repaired and should tell us what we already -+ * knew from reading PI. -+ */ -+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) -+ return NULL; -+ -+ /* There's something there. Move "next_idx" attention to the next ring -+ * entry (and prefetch it) before returning what we found. */ -+ s->dqrr.next_idx++; -+ if (s->dqrr.next_idx == s->dqrr.dqrr_size) { -+ s->dqrr.next_idx = 0; -+ s->dqrr.valid_bit ^= QB_VALID_BIT; -+ } -+ /* If this is the final response to a volatile dequeue command -+ indicate that the vdq is no longer busy */ -+ flags = qbman_result_DQ_flags(dq); -+ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb); -+ if ((response_verb == QBMAN_RESULT_DQ) && -+ (flags & QBMAN_DQ_STAT_VOLATILE) && -+ (flags & QBMAN_DQ_STAT_EXPIRED)) -+ atomic_inc(&s->vdq.busy); -+ -+ return dq; -+} -+ -+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */ -+void qbman_swp_dqrr_consume(struct qbman_swp *s, -+ const struct qbman_result *dq) -+{ -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); -+} -+ -+/*********************************/ -+/* Polling user-provided storage */ -+/*********************************/ -+ -+int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s, -+ const struct qbman_result *dq) -+{ -+ /* To avoid converting the little-endian DQ entry to host-endian prior -+ * to us knowing whether there is a valid entry or not (and run the -+ * risk of corrupting the incoming hardware LE write), we detect in -+ * hardware endianness rather than host. This means we need a different -+ * "code" depending on whether we are BE or LE in software, which is -+ * where DQRR_TOK_OFFSET comes in... */ -+ static struct qb_attr_code code_dqrr_tok_detect = -+ QB_CODE(0, DQRR_TOK_OFFSET, 8); -+ /* The user trying to poll for a result treats "dq" as const. It is -+ * however the same address that was provided to us non-const in the -+ * first place, for directing hardware DMA to. So we can cast away the -+ * const because it is mutable from our perspective. */ -+ uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq); -+ uint32_t token; -+ -+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]); -+ if (token != 1) -+ return 0; -+ qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0); -+ -+ /* Only now do we convert from hardware to host endianness. Also, as we -+ * are returning success, the user has promised not to call us again, so -+ * there's no risk of us converting the endianness twice... */ -+ make_le32_n(p, 16); -+ return 1; -+} -+ -+int qbman_check_command_complete(struct qbman_swp *s, -+ const struct qbman_result *dq) -+{ -+ /* To avoid converting the little-endian DQ entry to host-endian prior -+ * to us knowing whether there is a valid entry or not (and run the -+ * risk of corrupting the incoming hardware LE write), we detect in -+ * hardware endianness rather than host. This means we need a different -+ * "code" depending on whether we are BE or LE in software, which is -+ * where DQRR_TOK_OFFSET comes in... */ -+ static struct qb_attr_code code_dqrr_tok_detect = -+ QB_CODE(0, DQRR_TOK_OFFSET, 8); -+ /* The user trying to poll for a result treats "dq" as const. It is -+ * however the same address that was provided to us non-const in the -+ * first place, for directing hardware DMA to. So we can cast away the -+ * const because it is mutable from our perspective. */ -+ uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq); -+ uint32_t token; -+ -+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]); -+ if (token != 1) -+ return 0; -+ /*When token is set it indicates that VDQ command has been fetched by qbman and -+ *is working on it. It is safe for software to issue another VDQ command, so -+ *incrementing the busy variable.*/ -+ if (s->vdq.storage == dq) { -+ s->vdq.storage = NULL; -+ atomic_inc(&s->vdq.busy); -+ } -+ return 1; -+} -+ -+/********************************/ -+/* Categorising qbman results */ -+/********************************/ -+ -+static struct qb_attr_code code_result_in_mem = -+ QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7); -+ -+static inline int __qbman_result_is_x(const struct qbman_result *dq, -+ uint32_t x) -+{ -+ const uint32_t *p = qb_cl(dq); -+ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p); -+ -+ return (response_verb == x); -+} -+ -+static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq, -+ uint32_t x) -+{ -+ const uint32_t *p = qb_cl(dq); -+ uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p); -+ -+ return (response_verb == x); -+} -+ -+int qbman_result_is_DQ(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ); -+} -+ -+int qbman_result_is_FQDAN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN); -+} -+ -+int qbman_result_is_CDAN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN); -+} -+ -+int qbman_result_is_CSCN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) || -+ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ); -+} -+ -+int qbman_result_is_BPSCN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN); -+} -+ -+int qbman_result_is_CGCU(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU); -+} -+ -+int qbman_result_is_FQRN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN); -+} -+ -+int qbman_result_is_FQRNI(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI); -+} -+ -+int qbman_result_is_FQPN(const struct qbman_result *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN); -+} -+ -+/*********************************/ -+/* Parsing frame dequeue results */ -+/*********************************/ -+ -+/* These APIs assume qbman_result_is_DQ() is TRUE */ -+ -+uint32_t qbman_result_DQ_flags(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_stat, p); -+} -+ -+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p); -+} -+ -+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p); -+} -+ -+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_fqid, p); -+} -+ -+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_byte_count, p); -+} -+ -+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_frame_count, p); -+} -+ -+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq) -+{ -+ const uint64_t *p = (const uint64_t *)qb_cl(dq); -+ -+ return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p); -+} -+ -+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (const struct qbman_fd *)&p[8]; -+} -+ -+/**************************************/ -+/* Parsing state-change notifications */ -+/**************************************/ -+ -+static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8); -+static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_scn_state_in_mem = -+ QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8); -+static struct qb_attr_code code_scn_rid_in_mem = -+ QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24); -+static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32); -+ -+uint8_t qbman_result_SCN_state(const struct qbman_result *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return (uint8_t)qb_attr_code_decode(&code_scn_state, p); -+} -+ -+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return qb_attr_code_decode(&code_scn_rid, p); -+} -+ -+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn) -+{ -+ const uint64_t *p = (const uint64_t *)qb_cl(scn); -+ -+ return qb_attr_code_decode_64(&code_scn_ctx_lo, p); -+} -+ -+uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p); -+} -+ -+uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ uint32_t result_rid; -+ -+ result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p); -+ return make_le24(result_rid); -+} -+ -+/*****************/ -+/* Parsing BPSCN */ -+/*****************/ -+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn) -+{ -+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF; -+} -+ -+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn) -+{ -+ return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1); -+} -+ -+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn) -+{ -+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2); -+} -+ -+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn) -+{ -+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4); -+} -+ -+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn) -+{ -+ uint64_t ctx; -+ uint32_t ctx_hi, ctx_lo; -+ -+ ctx = qbman_result_SCN_ctx(scn); -+ ctx_hi = upper32(ctx); -+ ctx_lo = lower32(ctx); -+ return ((uint64_t)make_le32(ctx_hi) << 32 | -+ (uint64_t)make_le32(ctx_lo)); -+} -+ -+/*****************/ -+/* Parsing CGCU */ -+/*****************/ -+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn) -+{ -+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF; -+} -+ -+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn) -+{ -+ uint64_t ctx; -+ uint32_t ctx_hi, ctx_lo; -+ -+ ctx = qbman_result_SCN_ctx(scn); -+ ctx_hi = upper32(ctx); -+ ctx_lo = lower32(ctx); -+ return ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) | -+ (uint64_t)make_le32(ctx_lo); -+} -+ -+/******************/ -+/* Buffer release */ -+/******************/ -+ -+/* These should be const, eventually */ -+/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */ -+static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1); -+static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1); -+static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16); -+ -+void qbman_release_desc_clear(struct qbman_release_desc *d) -+{ -+ uint32_t *cl; -+ -+ memset(d, 0, sizeof(*d)); -+ cl = qb_cl(d); -+ qb_attr_code_encode(&code_release_set_me, cl, 1); -+} -+ -+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_release_bpid, cl, bpid); -+} -+ -+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_release_rcdi, cl, !!enable); -+} -+ -+#define RAR_IDX(rar) ((rar) & 0x7) -+#define RAR_VB(rar) ((rar) & 0x80) -+#define RAR_SUCCESS(rar) ((rar) & 0x100) -+ -+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, -+ const uint64_t *buffers, unsigned int num_buffers) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR); -+ -+ pr_debug("RAR=%08x\n", rar); -+ if (!RAR_SUCCESS(rar)) -+ return -EBUSY; -+ BUG_ON(!num_buffers || (num_buffers > 7)); -+ /* Start the release command */ -+ p = qbman_cena_write_start_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); -+ /* Copy the caller's buffer pointers to the command */ -+ u64_to_le32_copy(&p[2], buffers, num_buffers); -+ /* Set the verb byte, have to substitute in the valid-bit and the number -+ * of buffers. */ -+ lwsync(); -+ p[0] = cl[0] | RAR_VB(rar) | num_buffers; -+ qbman_cena_write_complete_wo_shadow(&s->sys, -+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); -+ return 0; -+} -+ -+/*******************/ -+/* Buffer acquires */ -+/*******************/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3); -+static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3); -+ -+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers, -+ unsigned int num_buffers) -+{ -+ uint32_t *p; -+ uint32_t rslt, num; -+ -+ BUG_ON(!num_buffers || (num_buffers > 7)); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_acquire_bpid, p, bpid); -+ qb_attr_code_encode(&code_acquire_num, p, num_buffers); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ num = qb_attr_code_decode(&code_acquire_r_num, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_MC_ACQUIRE); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n", -+ bpid, rslt); -+ return -EIO; -+ } -+ BUG_ON(num > num_buffers); -+ /* Copy the acquired buffers to the caller's array */ -+ u64_from_le32_copy(buffers, &p[2], num); -+ return (int)num; -+} -+ -+/*****************/ -+/* FQ management */ -+/*****************/ -+ -+static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32); -+ -+static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid, -+ uint8_t alt_fq_verb) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ qb_attr_code_encode(&code_fqalt_fqid, p, fqid); -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n", -+ fqid, alt_fq_verb, rslt); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE); -+} -+ -+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE); -+} -+ -+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON); -+} -+ -+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF); -+} -+ -+/**********************/ -+/* Channel management */ -+/**********************/ -+ -+static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12); -+static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8); -+static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1); -+static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32); -+ -+/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it -+ * would be irresponsible to expose it. */ -+#define CODE_CDAN_WE_EN 0x1 -+#define CODE_CDAN_WE_CTX 0x4 -+ -+static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid, -+ uint8_t we_mask, uint8_t cdan_en, -+ uint64_t ctx) -+{ -+ uint32_t *p; -+ uint32_t rslt; -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_cdan_cid, p, channelid); -+ qb_attr_code_encode(&code_cdan_we, p, we_mask); -+ qb_attr_code_encode(&code_cdan_en, p, cdan_en); -+ qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx); -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE); -+ -+ /* Decode the outcome */ -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) -+ != QBMAN_WQCHAN_CONFIGURE); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("CDAN cQID %d failed: code = 0x%02x\n", -+ channelid, rslt); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_CTX, -+ 0, ctx); -+} -+ -+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN, -+ 1, 0); -+} -+ -+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN, -+ 0, 0); -+} -+ -+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX, -+ 1, ctx); -+} -+ -+uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr) -+{ -+ return QBMAN_IDX_FROM_DQRR(dqrr); -+} -+ -+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx) -+{ -+ struct qbman_result *dq; -+ -+ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx)); -+ return dq; -+} -+ -+int qbman_swp_send_multiple(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd, -+ int frames_to_send) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t eqcr_ci; -+ uint8_t diff; -+ int sent = 0; -+ int i; -+ int initial_pi = s->eqcr.pi; -+ uint64_t start_pointer; -+ -+ if (!s->eqcr.available) { -+ eqcr_ci = s->eqcr.ci; -+ s->eqcr.ci = qbman_cena_read_reg(&s->sys, -+ QBMAN_CENA_SWP_EQCR_CI) & 0xF; -+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE, -+ eqcr_ci, s->eqcr.ci); -+ if (!diff) -+ goto done; -+ s->eqcr.available += diff; -+ } -+ -+ /* we are trying to send frames_to_send if we have enough space in the ring */ -+ while (s->eqcr.available && frames_to_send--) { -+ p = qbman_cena_write_start_wo_shadow_fast(&s->sys, -+ QBMAN_CENA_SWP_EQCR((initial_pi) & 7)); -+ /* Write command (except of first byte) and FD */ -+ memcpy(&p[1], &cl[1], 7*4); -+ memcpy(&p[8], &fd[sent], sizeof(struct qbman_fd)); -+ -+ initial_pi++; -+ initial_pi &= 0xF; -+ s->eqcr.available--; -+ sent++; -+ } -+ -+ done: -+ initial_pi = s->eqcr.pi; -+ lwsync(); -+ -+ /* in order for flushes to complete faster */ -+ /*For that we use a following trick: we record all lines in 32 bit word */ -+ -+ initial_pi = s->eqcr.pi; -+ for (i = 0; i < sent; i++) { -+ p = qbman_cena_write_start_wo_shadow_fast(&s->sys, -+ QBMAN_CENA_SWP_EQCR((initial_pi) & 7)); -+ -+ p[0] = cl[0] | s->eqcr.pi_vb; -+ initial_pi++; -+ initial_pi &= 0xF; -+ -+ if (!(initial_pi & 7)) -+ s->eqcr.pi_vb ^= QB_VALID_BIT; -+ } -+ -+ initial_pi = s->eqcr.pi; -+ -+ /* We need to flush all the lines but without load/store operations between them */ -+ /* We assign start_pointer before we start loop so that in loop we do not read it from memory */ -+ start_pointer = (uint64_t)s->sys.addr_cena; -+ for (i = 0; i < sent; i++) { -+ p = (uint32_t *)(start_pointer + QBMAN_CENA_SWP_EQCR(initial_pi & 7)); -+ dcbf((uint64_t)p); -+ initial_pi++; -+ initial_pi &= 0xF; -+ } -+ -+ /* Update producer index for the next call */ -+ s->eqcr.pi = initial_pi; -+ -+ return sent; -+} -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_portal.h b/drivers/net/dpaa2/qbman/driver/qbman_portal.h -new file mode 100644 -index 0000000..17f1c53 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_portal.h -@@ -0,0 +1,270 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_private.h" -+#include -+ -+uint32_t qman_version; -+/* All QBMan command and result structures use this "valid bit" encoding */ -+#define QB_VALID_BIT ((uint32_t)0x80) -+ -+/* Management command result codes */ -+#define QBMAN_MC_RSLT_OK 0xf0 -+ -+/* QBMan DQRR size is set at runtime in qbman_portal.c */ -+ -+#define QBMAN_EQCR_SIZE 8 -+ -+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last) -+{ -+ /* 'first' is included, 'last' is excluded */ -+ if (first <= last) -+ return last - first; -+ return (2 * ringsize) + last - first; -+} -+ -+/* --------------------- */ -+/* portal data structure */ -+/* --------------------- */ -+ -+struct qbman_swp { -+ const struct qbman_swp_desc *desc; -+ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it -+ * needs in here. */ -+ struct qbman_swp_sys sys; -+ /* Management commands */ -+ struct { -+#ifdef QBMAN_CHECKING -+ enum swp_mc_check { -+ swp_mc_can_start, /* call __qbman_swp_mc_start() */ -+ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */ -+ swp_mc_can_poll, /* call __qbman_swp_mc_result() */ -+ } check; -+#endif -+ uint32_t valid_bit; /* 0x00 or 0x80 */ -+ } mc; -+ /* Push dequeues */ -+ uint32_t sdq; -+ /* Volatile dequeues */ -+ struct { -+ /* VDQCR supports a "1 deep pipeline", meaning that if you know -+ * the last-submitted command is already executing in the -+ * hardware (as evidenced by at least 1 valid dequeue result), -+ * you can write another dequeue command to the register, the -+ * hardware will start executing it as soon as the -+ * already-executing command terminates. (This minimises latency -+ * and stalls.) With that in mind, this "busy" variable refers -+ * to whether or not a command can be submitted, not whether or -+ * not a previously-submitted command is still executing. In -+ * other words, once proof is seen that the previously-submitted -+ * command is executing, "vdq" is no longer "busy". */ -+ atomic_t busy; -+ uint32_t valid_bit; /* 0x00 or 0x80 */ -+ /* We need to determine when vdq is no longer busy. This depends -+ * on whether the "busy" (last-submitted) dequeue command is -+ * targeting DQRR or main-memory, and detected is based on the -+ * presence of the dequeue command's "token" showing up in -+ * dequeue entries in DQRR or main-memory (respectively). */ -+ struct qbman_result *storage; /* NULL if DQRR */ -+ } vdq; -+ /* DQRR */ -+ struct { -+ uint32_t next_idx; -+ uint32_t valid_bit; -+ uint8_t dqrr_size; -+ int reset_bug; -+ } dqrr; -+ struct { -+ uint32_t pi; -+ uint32_t pi_vb; -+ uint32_t ci; -+ int available; -+ } eqcr; -+}; -+ -+/* -------------------------- */ -+/* portal management commands */ -+/* -------------------------- */ -+ -+/* Different management commands all use this common base layer of code to issue -+ * commands and poll for results. The first function returns a pointer to where -+ * the caller should fill in their MC command (though they should ignore the -+ * verb byte), the second function commits merges in the caller-supplied command -+ * verb (which should not include the valid-bit) and submits the command to -+ * hardware, and the third function checks for a completed response (returns -+ * non-NULL if only if the response is complete). */ -+void *qbman_swp_mc_start(struct qbman_swp *p); -+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb); -+void *qbman_swp_mc_result(struct qbman_swp *p); -+ -+/* Wraps up submit + poll-for-result */ -+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, -+ uint32_t cmd_verb) -+{ -+ int loopvar; -+ -+ qbman_swp_mc_submit(swp, cmd, cmd_verb); -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ cmd = qbman_swp_mc_result(swp); -+ } while (!cmd); -+ return cmd; -+} -+ -+/* ------------ */ -+/* qb_attr_code */ -+/* ------------ */ -+ -+/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which -+ * is either serving as a configuration command or a query result. The -+ * representation is inherently little-endian, as the indexing of the words is -+ * itself little-endian in nature and DPAA2 QBMan is little endian for anything -+ * that crosses a word boundary too (64-bit fields are the obvious examples). -+ */ -+struct qb_attr_code { -+ unsigned int word; /* which uint32_t[] array member encodes the field */ -+ unsigned int lsoffset; /* encoding offset from ls-bit */ -+ unsigned int width; /* encoding width. (bool must be 1.) */ -+}; -+ -+/* Some pre-defined codes */ -+extern struct qb_attr_code code_generic_verb; -+extern struct qb_attr_code code_generic_rslt; -+ -+/* Macros to define codes */ -+#define QB_CODE(a, b, c) { a, b, c} -+#define QB_CODE_NULL \ -+ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1) -+ -+/* Rotate a code "ms", meaning that it moves from less-significant bytes to -+ * more-significant, from less-significant words to more-significant, etc. The -+ * "ls" version does the inverse, from more-significant towards -+ * less-significant. -+ */ -+static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code, -+ unsigned int bits) -+{ -+ code->lsoffset += bits; -+ while (code->lsoffset > 31) { -+ code->word++; -+ code->lsoffset -= 32; -+ } -+} -+ -+static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code, -+ unsigned int bits) -+{ -+ /* Don't be fooled, this trick should work because the types are -+ * unsigned. So the case that interests the while loop (the rotate has -+ * gone too far and the word count needs to compensate for it), is -+ * manifested when lsoffset is negative. But that equates to a really -+ * large unsigned value, starting with lots of "F"s. As such, we can -+ * continue adding 32 back to it until it wraps back round above zero, -+ * to a value of 31 or less... -+ */ -+ code->lsoffset -= bits; -+ while (code->lsoffset > 31) { -+ code->word--; -+ code->lsoffset += 32; -+ } -+} -+ -+/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */ -+#define qb_attr_code_for_ms(code, bits, expr) \ -+ for (; expr; qb_attr_code_rotate_ms(code, bits)) -+#define qb_attr_code_for_ls(code, bits, expr) \ -+ for (; expr; qb_attr_code_rotate_ls(code, bits)) -+ -+/* decode a field from a cacheline */ -+static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code, -+ const uint32_t *cacheline) -+{ -+ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]); -+} -+ -+static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code, -+ const uint64_t *cacheline) -+{ -+ return cacheline[code->word / 2]; -+} -+ -+/* encode a field to a cacheline */ -+static inline void qb_attr_code_encode(const struct qb_attr_code *code, -+ uint32_t *cacheline, uint32_t val) -+{ -+ cacheline[code->word] = -+ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word]) -+ | e32_uint32_t(code->lsoffset, code->width, val); -+} -+ -+static inline void qb_attr_code_encode_64(const struct qb_attr_code *code, -+ uint64_t *cacheline, uint64_t val) -+{ -+ cacheline[code->word / 2] = val; -+} -+ -+/* Small-width signed values (two's-complement) will decode into medium-width -+ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to -+ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value -+ * 249. Likewise -120 would decode as 136.) This function allows the caller to -+ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit -+ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t). -+ */ -+static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code, -+ uint32_t val) -+{ -+ BUG_ON(val >= (1u << code->width)); -+ /* code->width should never exceed the width of val. If it does then a -+ * different function with larger val size must be used to translate -+ * from unsigned to signed */ -+ BUG_ON(code->width > sizeof(val) * CHAR_BIT); -+ /* If the high bit was set, it was encoding a negative */ -+ if (val >= 1u << (code->width - 1)) -+ return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) - -+ val); -+ /* Otherwise, it was encoding a positive */ -+ return (int32_t)val; -+} -+ -+/* ---------------------- */ -+/* Descriptors/cachelines */ -+/* ---------------------- */ -+ -+/* To avoid needless dynamic allocation, the driver API often gives the caller -+ * a "descriptor" type that the caller can instantiate however they like. -+ * Ultimately though, it is just a cacheline of binary storage (or something -+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for -+ * holding pre-formatted pieces of hardware commands. The performance-critical -+ * code can then copy these descriptors directly into hardware command -+ * registers more efficiently than trying to construct/format commands -+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in -+ * order for the compiler to know its size, but the internal details are not -+ * exposed. The following macro is used within the driver for converting *any* -+ * descriptor pointer to a usable array pointer. The use of a macro (instead of -+ * an inline) is necessary to work with different descriptor types and to work -+ * correctly with const and non-const inputs (and similarly-qualified outputs). -+ */ -+#define qb_cl(d) (&(d)->dont_manipulate_directly[0]) -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_private.h b/drivers/net/dpaa2/qbman/driver/qbman_private.h -new file mode 100644 -index 0000000..624ede1 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_private.h -@@ -0,0 +1,168 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+*/ -+ -+/* Perform extra checking */ -+#define QBMAN_CHECKING -+ -+/* To maximise the amount of logic that is common between the Linux driver and -+ * other targets (such as the embedded MC firmware), we pivot here between the -+ * inclusion of two platform-specific headers. -+ * -+ * The first, qbman_sys_decl.h, includes any and all required system headers as -+ * well as providing any definitions for the purposes of compatibility. The -+ * second, qbman_sys.h, is where platform-specific routines go. -+ * -+ * The point of the split is that the platform-independent code (including this -+ * header) may depend on platform-specific declarations, yet other -+ * platform-specific routines may depend on platform-independent definitions. -+ */ -+ -+#include "qbman_sys_decl.h" -+ -+/* When things go wrong, it is a convenient trick to insert a few FOO() -+ * statements in the code to trace progress. TODO: remove this once we are -+ * hacking the code less actively. -+ */ -+#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__) -+ -+/* Any time there is a register interface which we poll on, this provides a -+ * "break after x iterations" scheme for it. It's handy for debugging, eg. -+ * where you don't want millions of lines of log output from a polling loop -+ * that won't, because such things tend to drown out the earlier log output -+ * that might explain what caused the problem. (NB: put ";" after each macro!) -+ * TODO: we should probably remove this once we're done sanitising the -+ * simulator... -+ */ -+#define DBG_POLL_START(loopvar) (loopvar = 10) -+#define DBG_POLL_CHECK(loopvar) \ -+ do {if (!(loopvar--)) BUG_ON(NULL == "DBG_POLL_CHECK"); } while (0) -+ -+/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets -+ * and widths, these macro-generated encode/decode/isolate/remove inlines can -+ * be used. -+ * -+ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type), -+ * where the field is located 3 bits "up" from the least-significant bit of the -+ * register (ie. the field location within the 32-bit register corresponds to a -+ * mask of 0x0001fff8), you would do; -+ * uint16_t field = d32_uint16_t(3, 14, reg_value); -+ * -+ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE, -+ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!" -+ * operator) into a register at bit location 0x00080000 (19 bits "in" from the -+ * LS bit), do; -+ * reg_value |= e32_int(19, 1, !!field); -+ * -+ * If you wish to read-modify-write a register, such that you leave the 14-bit -+ * field as-is but have all other fields set to zero, then "i"solate the 14-bit -+ * value using; -+ * reg_value = i32_uint16_t(3, 14, reg_value); -+ * -+ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to -+ * zero) but leaving all other fields as-is; -+ * reg_val = r32_int(19, 1, reg_value); -+ * -+ */ -+#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \ -+ (uint32_t)((1 << width) - 1)) -+#define DECLARE_CODEC32(t) \ -+static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \ -+} \ -+static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \ -+} \ -+static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \ -+ uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \ -+} \ -+static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \ -+ uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return ~(MAKE_MASK32(width) << lsoffset) & val; \ -+} -+DECLARE_CODEC32(uint32_t) -+DECLARE_CODEC32(uint16_t) -+DECLARE_CODEC32(uint8_t) -+DECLARE_CODEC32(int) -+ -+ /*********************/ -+ /* Debugging assists */ -+ /*********************/ -+ -+static inline void __hexdump(unsigned long start, unsigned long end, -+ unsigned long p, size_t sz, const unsigned char *c) -+{ -+ while (start < end) { -+ unsigned int pos = 0; -+ char buf[64]; -+ int nl = 0; -+ -+ pos += sprintf(buf + pos, "%08lx: ", start); -+ do { -+ if ((start < p) || (start >= (p + sz))) -+ pos += sprintf(buf + pos, ".."); -+ else -+ pos += sprintf(buf + pos, "%02x", *(c++)); -+ if (!(++start & 15)) { -+ buf[pos++] = '\n'; -+ nl = 1; -+ } else { -+ nl = 0; -+ if (!(start & 1)) -+ buf[pos++] = ' '; -+ if (!(start & 3)) -+ buf[pos++] = ' '; -+ } -+ } while (start & 15); -+ if (!nl) -+ buf[pos++] = '\n'; -+ buf[pos] = '\0'; -+ pr_info("%s", buf); -+ } -+} -+ -+static inline void hexdump(const void *ptr, size_t sz) -+{ -+ unsigned long p = (unsigned long)ptr; -+ unsigned long start = p & ~(unsigned long)15; -+ unsigned long end = (p + sz + 15) & ~(unsigned long)15; -+ const unsigned char *c = ptr; -+ -+ __hexdump(start, end, p, sz, c); -+} -+ -+#define QMAN_REV_4000 0x04000000 -+#define QMAN_REV_4100 0x04010000 -+#define QMAN_REV_4101 0x04010001 -+ -+#include "qbman_sys.h" -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_sys.h b/drivers/net/dpaa2/qbman/driver/qbman_sys.h -new file mode 100644 -index 0000000..a39fa71 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_sys.h -@@ -0,0 +1,373 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the -+ * driver. They are only included via qbman_private.h, which is itself a -+ * platform-independent file and is included by all the other driver source. -+ * -+ * qbman_sys_decl.h is included prior to all other declarations and logic, and -+ * it exists to provide compatibility with any linux interfaces our -+ * single-source driver code is dependent on (eg. kmalloc). Ie. this file -+ * provides linux compatibility. -+ * -+ * This qbman_sys.h header, on the other hand, is included *after* any common -+ * and platform-neutral declarations and logic in qbman_private.h, and exists to -+ * implement any platform-specific logic of the qbman driver itself. Ie. it is -+ * *not* to provide linux compatibility. -+ */ -+ -+/* Trace the 3 different classes of read/write access to QBMan. #undef as -+ * required. */ -+#undef QBMAN_CCSR_TRACE -+#undef QBMAN_CINH_TRACE -+#undef QBMAN_CENA_TRACE -+ -+static inline void word_copy(void *d, const void *s, unsigned int cnt) -+{ -+ uint32_t *dd = d; -+ const uint32_t *ss = s; -+ -+ while (cnt--) -+ *(dd++) = *(ss++); -+} -+ -+/* Currently, the CENA support code expects each 32-bit word to be written in -+ * host order, and these are converted to hardware (little-endian) order on -+ * command submission. However, 64-bit quantities are must be written (and read) -+ * as two 32-bit words with the least-significant word first, irrespective of -+ * host endianness. */ -+static inline void u64_to_le32_copy(void *d, const uint64_t *s, -+ unsigned int cnt) -+{ -+ uint32_t *dd = d; -+ const uint32_t *ss = (const uint32_t *)s; -+ -+ while (cnt--) { -+ /* TBD: the toolchain was choking on the use of 64-bit types up -+ * until recently so this works entirely with 32-bit variables. -+ * When 64-bit types become usable again, investigate better -+ * ways of doing this. */ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ *(dd++) = ss[1]; -+ *(dd++) = ss[0]; -+ ss += 2; -+#else -+ *(dd++) = *(ss++); -+ *(dd++) = *(ss++); -+#endif -+ } -+} -+ -+static inline void u64_from_le32_copy(uint64_t *d, const void *s, -+ unsigned int cnt) -+{ -+ const uint32_t *ss = s; -+ uint32_t *dd = (uint32_t *)d; -+ -+ while (cnt--) { -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+ dd[1] = *(ss++); -+ dd[0] = *(ss++); -+ dd += 2; -+#else -+ *(dd++) = *(ss++); -+ *(dd++) = *(ss++); -+#endif -+ } -+} -+ -+/* Convert a host-native 32bit value into little endian */ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+static inline uint32_t make_le32(uint32_t val) -+{ -+ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) | -+ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24); -+} -+ -+static inline uint32_t make_le24(uint32_t val) -+{ -+ return (((val & 0xff) << 16) | (val & 0xff00) | -+ ((val & 0xff0000) >> 16)); -+} -+#else -+#define make_le32(val) (val) -+#define make_le24(val) (val) -+#endif -+static inline void make_le32_n(uint32_t *val, unsigned int num) -+{ -+ while (num--) { -+ *val = make_le32(*val); -+ val++; -+ } -+} -+ -+ /******************/ -+ /* Portal access */ -+ /******************/ -+struct qbman_swp_sys { -+ /* On GPP, the sys support for qbman_swp is here. The CENA region isi -+ * not an mmap() of the real portal registers, but an allocated -+ * place-holder, because the actual writes/reads to/from the portal are -+ * marshalled from these allocated areas using QBMan's "MC access -+ * registers". CINH accesses are atomic so there's no need for a -+ * place-holder. */ -+ uint8_t *cena; -+ uint8_t __iomem *addr_cena; -+ uint8_t __iomem *addr_cinh; -+ uint32_t idx; -+ enum qbman_eqcr_mode eqcr_mode; -+}; -+ -+/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal -+ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH) -+ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index -+ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal) -+ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE) -+ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete) -+ */ -+ -+static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset, -+ uint32_t val) -+{ -+ __raw_writel(val, s->addr_cinh + offset); -+#ifdef QBMAN_CINH_TRACE -+ pr_info("qbman_cinh_write(%p:%d:0x%03x) 0x%08x\n", -+ s->addr_cinh, s->idx, offset, val); -+#endif -+} -+ -+static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset) -+{ -+ uint32_t reg = __raw_readl(s->addr_cinh + offset); -+#ifdef QBMAN_CINH_TRACE -+ pr_info("qbman_cinh_read(%p:%d:0x%03x) 0x%08x\n", -+ s->addr_cinh, s->idx, offset, reg); -+#endif -+ return reg; -+} -+ -+static inline void *qbman_cena_write_start(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ void *shadow = s->cena + offset; -+ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_start(%p:%d:0x%03x) %p\n", -+ s->addr_cena, s->idx, offset, shadow); -+#endif -+ BUG_ON(offset & 63); -+ dcbz(shadow); -+ return shadow; -+} -+ -+static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n", -+ s->addr_cena, s->idx, offset); -+#endif -+ BUG_ON(offset & 63); -+ return (s->addr_cena + offset); -+} -+ -+static inline void qbman_cena_write_complete(struct qbman_swp_sys *s, -+ uint32_t offset, void *cmd) -+{ -+ const uint32_t *shadow = cmd; -+ int loop; -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x) %p\n", -+ s->addr_cena, s->idx, offset, shadow); -+ hexdump(cmd, 64); -+#endif -+ for (loop = 15; loop >= 1; loop--) -+ __raw_writel(shadow[loop], s->addr_cena + -+ offset + loop * 4); -+ lwsync(); -+ __raw_writel(shadow[0], s->addr_cena + offset); -+ dcbf(s->addr_cena + offset); -+} -+ -+static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n", -+ s->addr_cena, s->idx, offset); -+ hexdump(cmd, 64); -+#endif -+ dcbf(s->addr_cena + offset); -+} -+ -+static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ return __raw_readl(s->addr_cena + offset); -+} -+ -+static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset) -+{ -+ uint32_t *shadow = (uint32_t *)(s->cena + offset); -+ unsigned int loop; -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n", -+ s->addr_cena, s->idx, offset, shadow); -+#endif -+ -+ for (loop = 0; loop < 16; loop++) -+ shadow[loop] = __raw_readl(s->addr_cena + offset -+ + loop * 4); -+#ifdef QBMAN_CENA_TRACE -+ hexdump(shadow, 64); -+#endif -+ return shadow; -+} -+ -+static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n", -+ s->addr_cena, s->idx, offset, shadow); -+#endif -+ -+#ifdef QBMAN_CENA_TRACE -+ hexdump(shadow, 64); -+#endif -+ return s->addr_cena + offset; -+} -+ -+static inline void qbman_cena_invalidate(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ dccivac(s->addr_cena + offset); -+} -+ -+static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ dccivac(s->addr_cena + offset); -+ prefetch_for_load(s->addr_cena + offset); -+} -+ -+static inline void qbman_cena_prefetch(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ prefetch_for_load(s->addr_cena + offset); -+} -+ -+ /******************/ -+ /* Portal support */ -+ /******************/ -+ -+/* The SWP_CFG portal register is special, in that it is used by the -+ * platform-specific code rather than the platform-independent code in -+ * qbman_portal.c. So use of it is declared locally here. */ -+#define QBMAN_CINH_SWP_CFG 0xd00 -+ -+/* For MC portal use, we always configure with -+ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4) -+ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x2) -+ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3) -+ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2) -+ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x2) -+ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- TRUE) -+ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE) -+ * SE is (SWP_CFG,3,1) - memory stashing enable (<- TRUE) -+ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE) -+ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- TRUE) -+ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- TRUE) -+ */ -+static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn, -+ uint8_t est, uint8_t rpm, uint8_t dcm, -+ uint8_t epm, int sd, int sp, int se, -+ int dp, int de, int ep) -+{ -+ uint32_t reg; -+ -+ reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) | -+ e32_uint8_t(16, 3, est) | -+ e32_uint8_t(12, 2, rpm) | e32_uint8_t(10, 2, dcm) | -+ e32_uint8_t(8, 2, epm) | e32_int(5, 1, sd) | -+ e32_int(4, 1, sp) | e32_int(3, 1, se) | e32_int(2, 1, dp) | -+ e32_int(1, 1, de) | e32_int(0, 1, ep) | e32_uint8_t(14, 1, wn); -+ return reg; -+} -+ -+static inline int qbman_swp_sys_init(struct qbman_swp_sys *s, -+ const struct qbman_swp_desc *d, -+ uint8_t dqrr_size) -+{ -+ uint32_t reg; -+ -+ s->addr_cena = d->cena_bar; -+ s->addr_cinh = d->cinh_bar; -+ s->idx = (uint32_t)d->idx; -+ s->cena = (void *)get_zeroed_page(GFP_KERNEL); -+ if (!s->cena) { -+ pr_err("Could not allocate page for cena shadow\n"); -+ return -1; -+ } -+ s->eqcr_mode = d->eqcr_mode; -+ BUG_ON(d->idx < 0); -+#ifdef QBMAN_CHECKING -+ /* We should never be asked to initialise for a portal that isn't in -+ * the power-on state. (Ie. don't forget to reset portals when they are -+ * decommissioned!) -+ */ -+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); -+ BUG_ON(reg); -+#endif -+ if (s->eqcr_mode == qman_eqcr_vb_array) -+ reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 1, 1, 1, 1, -+ 1, 1); -+ else -+ reg = qbman_set_swp_cfg(dqrr_size, 0, 2, 3, 2, 2, 1, 1, 1, 1, -+ 1, 1); -+ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg); -+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); -+ if (!reg) { -+ pr_err("The portal %d is not enabled!\n", s->idx); -+ kfree(s->cena); -+ return -1; -+ } -+ return 0; -+} -+ -+static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s) -+{ -+ free_page((unsigned long)s->cena); -+} -+ -+static inline void *qbman_cena_write_start_wo_shadow_fast(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n", -+ s->addr_cena, s->idx, offset); -+#endif -+ BUG_ON(offset & 63); -+ return (s->addr_cena + offset); -+} -diff --git a/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h b/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h -new file mode 100644 -index 0000000..bbf3627 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h -@@ -0,0 +1,69 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+ -+/* Sanity check */ -+#if (__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) && \ -+ (__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__) -+#error "Unknown endianness!" -+#endif -+ -+/* The platform-independent code shouldn't need endianness, except for -+ * weird/fast-path cases like qbman_result_has_token(), which needs to -+ * perform a passive and endianness-specific test on a read-only data structure -+ * very quickly. It's an exception, and this symbol is used for that case. */ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+#define DQRR_TOK_OFFSET 0 -+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24 -+#define SCN_STATE_OFFSET_IN_MEM 8 -+#define SCN_RID_OFFSET_IN_MEM 8 -+#else -+#define DQRR_TOK_OFFSET 24 -+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0 -+#define SCN_STATE_OFFSET_IN_MEM 16 -+#define SCN_RID_OFFSET_IN_MEM 0 -+#endif -+ -+/* Similarly-named functions */ -+#define upper32(a) upper_32_bits(a) -+#define lower32(a) lower_32_bits(a) -+ -+ /****************/ -+ /* arch assists */ -+ /****************/ -+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } -+#define lwsync() { asm volatile("dmb st" : : : "memory"); } -+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); } -+#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); } -+static inline void prefetch_for_load(void *p) -+{ -+ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); -+} -+ -+static inline void prefetch_for_store(void *p) -+{ -+ asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p)); -+} -diff --git a/drivers/net/dpaa2/qbman/include/compat.h b/drivers/net/dpaa2/qbman/include/compat.h -new file mode 100644 -index 0000000..456f938 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/include/compat.h -@@ -0,0 +1,637 @@ -+/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef HEADER_COMPAT_H -+#define HEADER_COMPAT_H -+ -+#include -+ -+#ifndef _GNU_SOURCE -+#define _GNU_SOURCE -+#endif -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* The following definitions are primarily to allow the single-source driver -+ * interfaces to be included by arbitrary program code. Ie. for interfaces that -+ * are also available in kernel-space, these definitions provide compatibility -+ * with certain attributes and types used in those interfaces. */ -+ -+/* Required compiler attributes */ -+#define __maybe_unused __attribute__((unused)) -+#define __always_unused __attribute__((unused)) -+#define __packed __attribute__((__packed__)) -+#define __user -+#define likely(x) __builtin_expect(!!(x), 1) -+#define unlikely(x) __builtin_expect(!!(x), 0) -+#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) -+#undef container_of -+#define container_of(ptr, type, member) ({ \ -+ typeof(((type *)0)->member)(*__mptr) = (ptr); \ -+ (type *)((char *)__mptr - offsetof(type, member)); }) -+#define __stringify_1(x) #x -+#define __stringify(x) __stringify_1(x) -+#define panic(x) \ -+do { \ -+ printf("panic: %s", x); \ -+ abort(); \ -+} while (0) -+ -+#ifdef ARRAY_SIZE -+#undef ARRAY_SIZE -+#endif -+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) -+ -+/* Required types */ -+typedef uint8_t u8; -+typedef uint16_t u16; -+typedef uint32_t u32; -+typedef uint64_t u64; -+typedef uint64_t dma_addr_t; -+typedef cpu_set_t cpumask_t; -+#define spinlock_t pthread_mutex_t -+typedef u32 compat_uptr_t; -+static inline void __user *compat_ptr(compat_uptr_t uptr) -+{ -+ return (void __user *)(unsigned long)uptr; -+} -+ -+static inline compat_uptr_t ptr_to_compat(void __user *uptr) -+{ -+ return (u32)(unsigned long)uptr; -+} -+ -+/* I/O operations */ -+static inline u32 in_be32(volatile void *__p) -+{ -+ volatile u32 *p = __p; -+ return *p; -+} -+ -+static inline void out_be32(volatile void *__p, u32 val) -+{ -+ volatile u32 *p = __p; -+ *p = val; -+} -+ -+/* Debugging */ -+#define prflush(fmt, args...) \ -+ do { \ -+ printf(fmt, ##args); \ -+ fflush(stdout); \ -+ } while (0) -+#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args) -+#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args) -+#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args) -+#define pr_info(fmt, args...) prflush(fmt, ##args) -+ -+#define BUG() abort() -+#ifdef CONFIG_BUGON -+#ifdef pr_debug -+#undef pr_debug -+#endif -+#define pr_debug(fmt, args...) printf(fmt, ##args) -+#define BUG_ON(c) \ -+do { \ -+ if (c) { \ -+ pr_crit("BUG: %s:%d\n", __FILE__, __LINE__); \ -+ abort(); \ -+ } \ -+} while (0) -+#define might_sleep_if(c) BUG_ON(c) -+#define msleep(x) \ -+do { \ -+ pr_crit("BUG: illegal call %s:%d\n", __FILE__, __LINE__); \ -+ exit(EXIT_FAILURE); \ -+} while (0) -+#else -+#ifdef pr_debug -+#undef pr_debug -+#endif -+#define pr_debug(fmt, args...) do { ; } while (0) -+#define BUG_ON(c) do { ; } while (0) -+#define might_sleep_if(c) do { ; } while (0) -+#define msleep(x) do { ; } while (0) -+#endif -+#define WARN_ON(c, str) \ -+do { \ -+ static int warned_##__LINE__; \ -+ if ((c) && !warned_##__LINE__) { \ -+ pr_warn("%s\n", str); \ -+ pr_warn("(%s:%d)\n", __FILE__, __LINE__); \ -+ warned_##__LINE__ = 1; \ -+ } \ -+} while (0) -+ -+#define ALIGN(x, a) (((x) + ((typeof(x))(a) - 1)) & ~((typeof(x))(a) - 1)) -+ -+/****************/ -+/* Linked-lists */ -+/****************/ -+ -+struct list_head { -+ struct list_head *prev; -+ struct list_head *next; -+}; -+ -+#define LIST_HEAD(n) \ -+struct list_head n = { \ -+ .prev = &n, \ -+ .next = &n \ -+} -+ -+#define INIT_LIST_HEAD(p) \ -+do { \ -+ struct list_head *__p298 = (p); \ -+ __p298->prev = __p298->next = __p298; \ -+} while (0) -+#define list_entry(node, type, member) \ -+ (type *)((void *)node - offsetof(type, member)) -+#define list_empty(p) \ -+({ \ -+ const struct list_head *__p298 = (p); \ -+ ((__p298->next == __p298) && (__p298->prev == __p298)); \ -+}) -+#define list_add(p, l) \ -+do { \ -+ struct list_head *__p298 = (p); \ -+ struct list_head *__l298 = (l); \ -+ __p298->next = __l298->next; \ -+ __p298->prev = __l298; \ -+ __l298->next->prev = __p298; \ -+ __l298->next = __p298; \ -+} while (0) -+#define list_add_tail(p, l) \ -+do { \ -+ struct list_head *__p298 = (p); \ -+ struct list_head *__l298 = (l); \ -+ __p298->prev = __l298->prev; \ -+ __p298->next = __l298; \ -+ __l298->prev->next = __p298; \ -+ __l298->prev = __p298; \ -+} while (0) -+#define list_for_each(i, l) \ -+ for (i = (l)->next; i != (l); i = i->next) -+#define list_for_each_safe(i, j, l) \ -+ for (i = (l)->next, j = i->next; i != (l); \ -+ i = j, j = i->next) -+#define list_for_each_entry(i, l, name) \ -+ for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \ -+ i = list_entry(i->name.next, typeof(*i), name)) -+#define list_for_each_entry_safe(i, j, l, name) \ -+ for (i = list_entry((l)->next, typeof(*i), name), \ -+ j = list_entry(i->name.next, typeof(*j), name); \ -+ &i->name != (l); \ -+ i = j, j = list_entry(j->name.next, typeof(*j), name)) -+#define list_del(i) \ -+do { \ -+ (i)->next->prev = (i)->prev; \ -+ (i)->prev->next = (i)->next; \ -+} while (0) -+ -+/* Other miscellaneous interfaces our APIs depend on; */ -+ -+#define lower_32_bits(x) ((u32)(x)) -+#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16)) -+ -+/* Compiler/type stuff */ -+typedef unsigned int gfp_t; -+typedef uint32_t phandle; -+ -+#define noinline __attribute__((noinline)) -+#define __iomem -+#define EINTR 4 -+#define ENODEV 19 -+#define MODULE_AUTHOR(s) -+#define MODULE_LICENSE(s) -+#define MODULE_DESCRIPTION(s) -+#define MODULE_PARM_DESC(x, y) -+#define EXPORT_SYMBOL(x) -+#define module_init(fn) int m_##fn(void) { return fn(); } -+#define module_exit(fn) void m_##fn(void) { fn(); } -+#define module_param(x, y, z) -+#define module_param_string(w, x, y, z) -+#define GFP_KERNEL 0 -+#define __KERNEL__ -+#define __init -+#define __raw_readb(p) *(const volatile unsigned char *)(p) -+#define __raw_readl(p) *(const volatile unsigned int *)(p) -+#define __raw_writel(v, p) \ -+do { \ -+ *(volatile unsigned int *)(p) = (v); \ -+} while (0) -+ -+/* printk() stuff */ -+#define printk(fmt, args...) do_not_use_printk -+#define nada(fmt, args...) do { ; } while (0) -+ -+/* Interrupt stuff */ -+typedef uint32_t irqreturn_t; -+#define IRQ_HANDLED 0 -+ -+/* memcpy() stuff - when you know alignments in advance */ -+#ifdef CONFIG_TRY_BETTER_MEMCPY -+static inline void copy_words(void *dest, const void *src, size_t sz) -+{ -+ u32 *__dest = dest; -+ const u32 *__src = src; -+ size_t __sz = sz >> 2; -+ -+ BUG_ON((unsigned long)dest & 0x3); -+ BUG_ON((unsigned long)src & 0x3); -+ BUG_ON(sz & 0x3); -+ while (__sz--) -+ *(__dest++) = *(__src++); -+} -+ -+static inline void copy_shorts(void *dest, const void *src, size_t sz) -+{ -+ u16 *__dest = dest; -+ const u16 *__src = src; -+ size_t __sz = sz >> 1; -+ -+ BUG_ON((unsigned long)dest & 0x1); -+ BUG_ON((unsigned long)src & 0x1); -+ BUG_ON(sz & 0x1); -+ while (__sz--) -+ *(__dest++) = *(__src++); -+} -+ -+static inline void copy_bytes(void *dest, const void *src, size_t sz) -+{ -+ u8 *__dest = dest; -+ const u8 *__src = src; -+ -+ while (sz--) -+ *(__dest++) = *(__src++); -+} -+#else -+#define copy_words memcpy -+#define copy_shorts memcpy -+#define copy_bytes memcpy -+#endif -+ -+/* Spinlock stuff */ -+#define spinlock_t pthread_mutex_t -+#define __SPIN_LOCK_UNLOCKED(x) PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP -+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -+#define spin_lock_init(x) \ -+ do { \ -+ __maybe_unused int __foo; \ -+ pthread_mutexattr_t __foo_attr; \ -+ __foo = pthread_mutexattr_init(&__foo_attr); \ -+ BUG_ON(__foo); \ -+ __foo = pthread_mutexattr_settype(&__foo_attr, \ -+ PTHREAD_MUTEX_ADAPTIVE_NP); \ -+ BUG_ON(__foo); \ -+ __foo = pthread_mutex_init(x, &__foo_attr); \ -+ BUG_ON(__foo); \ -+ } while (0) -+#define spin_lock(x) \ -+ do { \ -+ __maybe_unused int __foo = pthread_mutex_lock(x); \ -+ BUG_ON(__foo); \ -+ } while (0) -+#define spin_unlock(x) \ -+ do { \ -+ __maybe_unused int __foo = pthread_mutex_unlock(x); \ -+ BUG_ON(__foo); \ -+ } while (0) -+#define spin_lock_irq(x) do { \ -+ local_irq_disable(); \ -+ spin_lock(x); \ -+ } while (0) -+#define spin_unlock_irq(x) do { \ -+ spin_unlock(x); \ -+ local_irq_enable(); \ -+ } while (0) -+#define spin_lock_irqsave(x, f) do { spin_lock_irq(x); } while (0) -+#define spin_unlock_irqrestore(x, f) do { spin_unlock_irq(x); } while (0) -+ -+#define raw_spinlock_t spinlock_t -+#define raw_spin_lock_init(x) spin_lock_init(x) -+#define raw_spin_lock_irqsave(x, f) spin_lock(x) -+#define raw_spin_unlock_irqrestore(x, f) spin_unlock(x) -+ -+/* Completion stuff */ -+#define DECLARE_COMPLETION(n) int n = 0; -+#define complete(n) \ -+do { \ -+ *n = 1; \ -+} while (0) -+#define wait_for_completion(n) \ -+do { \ -+ while (!*n) { \ -+ bman_poll(); \ -+ qman_poll(); \ -+ } \ -+ *n = 0; \ -+} while (0) -+ -+/* Platform device stuff */ -+struct platform_device { void *dev; }; -+static inline struct -+platform_device *platform_device_alloc(const char *name __always_unused, -+ int id __always_unused) -+{ -+ struct platform_device *ret = malloc(sizeof(*ret)); -+ -+ if (ret) -+ ret->dev = NULL; -+ return ret; -+} -+ -+#define platform_device_add(pdev) 0 -+#define platform_device_del(pdev) do { ; } while (0) -+static inline void platform_device_put(struct platform_device *pdev) -+{ -+ free(pdev); -+} -+ -+struct resource { -+ int unused; -+}; -+ -+/* Allocator stuff */ -+#define kmalloc(sz, t) malloc(sz) -+#define vmalloc(sz) malloc(sz) -+#define kfree(p) do { if (p) free(p); } while (0) -+static inline void *kzalloc(size_t sz, gfp_t __foo __always_unused) -+{ -+ void *ptr = malloc(sz); -+ -+ if (ptr) -+ memset(ptr, 0, sz); -+ return ptr; -+} -+ -+static inline unsigned long get_zeroed_page(gfp_t __foo __always_unused) -+{ -+ void *p; -+ -+ if (posix_memalign(&p, 4096, 4096)) -+ return 0; -+ memset(p, 0, 4096); -+ return (unsigned long)p; -+} -+ -+static inline void free_page(unsigned long p) -+{ -+ free((void *)p); -+} -+ -+struct kmem_cache { -+ size_t sz; -+ size_t align; -+}; -+ -+#define SLAB_HWCACHE_ALIGN 0 -+static inline struct kmem_cache *kmem_cache_create(const char *n __always_unused, -+ size_t sz, size_t align, unsigned long flags __always_unused, -+ void (*c)(void *) __always_unused) -+{ -+ struct kmem_cache *ret = malloc(sizeof(*ret)); -+ -+ if (ret) { -+ ret->sz = sz; -+ ret->align = align; -+ } -+ return ret; -+} -+ -+static inline void kmem_cache_destroy(struct kmem_cache *c) -+{ -+ free(c); -+} -+ -+static inline void *kmem_cache_alloc(struct kmem_cache *c, gfp_t f __always_unused) -+{ -+ void *p; -+ -+ if (posix_memalign(&p, c->align, c->sz)) -+ return NULL; -+ return p; -+} -+ -+static inline void kmem_cache_free(struct kmem_cache *c __always_unused, void *p) -+{ -+ free(p); -+} -+ -+static inline void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t f) -+{ -+ void *ret = kmem_cache_alloc(c, f); -+ -+ if (ret) -+ memset(ret, 0, c->sz); -+ return ret; -+} -+ -+/* Bitfield stuff. */ -+#define BITS_PER_ULONG (sizeof(unsigned long) << 3) -+#define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6) -+#define BITS_MASK(idx) ((unsigned long)1 << ((idx) & (BITS_PER_ULONG - 1))) -+#define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG) -+static inline unsigned long test_bits(unsigned long mask, -+ volatile unsigned long *p) -+{ -+ return *p & mask; -+} -+ -+static inline int test_bit(int idx, volatile unsigned long *bits) -+{ -+ return test_bits(BITS_MASK(idx), bits + BITS_IDX(idx)); -+} -+ -+static inline void set_bits(unsigned long mask, volatile unsigned long *p) -+{ -+ *p |= mask; -+} -+ -+static inline void set_bit(int idx, volatile unsigned long *bits) -+{ -+ set_bits(BITS_MASK(idx), bits + BITS_IDX(idx)); -+} -+ -+static inline void clear_bits(unsigned long mask, volatile unsigned long *p) -+{ -+ *p &= ~mask; -+} -+ -+static inline void clear_bit(int idx, volatile unsigned long *bits) -+{ -+ clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx)); -+} -+ -+static inline unsigned long test_and_set_bits(unsigned long mask, -+ volatile unsigned long *p) -+{ -+ unsigned long ret = test_bits(mask, p); -+ -+ set_bits(mask, p); -+ return ret; -+} -+ -+static inline int test_and_set_bit(int idx, volatile unsigned long *bits) -+{ -+ int ret = test_bit(idx, bits); -+ -+ set_bit(idx, bits); -+ return ret; -+} -+ -+static inline int test_and_clear_bit(int idx, volatile unsigned long *bits) -+{ -+ int ret = test_bit(idx, bits); -+ -+ clear_bit(idx, bits); -+ return ret; -+} -+ -+static inline int find_next_zero_bit(unsigned long *bits, int limit, int idx) -+{ -+ while ((++idx < limit) && test_bit(idx, bits)) -+ ; -+ return idx; -+} -+ -+static inline int find_first_zero_bit(unsigned long *bits, int limit) -+{ -+ int idx = 0; -+ -+ while (test_bit(idx, bits) && (++idx < limit)) -+ ; -+ return idx; -+} -+ -+static inline u64 div64_u64(u64 n, u64 d) -+{ -+ return n / d; -+} -+ -+#define dmb(opt) { asm volatile("dmb " #opt : : : "memory"); } -+#define smp_mb() dmb(ish) -+ -+/* Atomic stuff */ -+typedef struct { -+ int counter; -+} atomic_t; -+ -+#define atomic_read(v) (*(volatile int *)&(v)->counter) -+#define atomic_set(v, i) (((v)->counter) = (i)) -+static inline void atomic_add(int i, atomic_t *v) -+{ -+ unsigned long tmp; -+ int result; -+ -+ asm volatile("// atomic_add\n" -+ "1: ldxr %w0, %2\n" -+ " add %w0, %w0, %w3\n" -+ " stxr %w1, %w0, %2\n" -+ " cbnz %w1, 1b" -+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -+ : "Ir" (i)); -+} -+ -+static inline int atomic_add_return(int i, atomic_t *v) -+{ -+ unsigned long tmp; -+ int result; -+ -+ asm volatile("// atomic_add_return\n" -+ "1: ldxr %w0, %2\n" -+ " add %w0, %w0, %w3\n" -+ " stlxr %w1, %w0, %2\n" -+ " cbnz %w1, 1b" -+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -+ : "Ir" (i) -+ : "memory"); -+ -+ smp_mb(); -+ return result; -+} -+ -+static inline void atomic_sub(int i, atomic_t *v) -+{ -+ unsigned long tmp; -+ int result; -+ -+ asm volatile("// atomic_sub\n" -+ "1: ldxr %w0, %2\n" -+ " sub %w0, %w0, %w3\n" -+ " stxr %w1, %w0, %2\n" -+ " cbnz %w1, 1b" -+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -+ : "Ir" (i)); -+} -+ -+static inline int atomic_sub_return(int i, atomic_t *v) -+{ -+ unsigned long tmp; -+ int result; -+ -+ asm volatile("// atomic_sub_return\n" -+ "1: ldxr %w0, %2\n" -+ " sub %w0, %w0, %w3\n" -+ " stlxr %w1, %w0, %2\n" -+ " cbnz %w1, 1b" -+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -+ : "Ir" (i) -+ : "memory"); -+ -+ smp_mb(); -+ return result; -+} -+ -+#define atomic_inc(v) atomic_add(1, v) -+#define atomic_dec(v) atomic_sub(1, v) -+ -+#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -+#define atomic_inc_return(v) (atomic_add_return(1, v)) -+#define atomic_dec_return(v) (atomic_sub_return(1, v)) -+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) -+ -+#endif /* HEADER_COMPAT_H */ -diff --git a/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h -new file mode 100644 -index 0000000..4cb784c ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h -@@ -0,0 +1,151 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_QBMAN_BASE_H -+#define _FSL_QBMAN_BASE_H -+ -+/** -+ * DOC: QBMan basic structures -+ * -+ * The QBMan block descriptor, software portal descriptor and Frame descriptor -+ * are defined here. -+ * -+ */ -+ -+/** -+ * struct qbman_block_desc - qbman block descriptor structure -+ * @ccsr_reg_bar: CCSR register map. -+ * @irq_rerr: Recoverable error interrupt line. -+ * @irq_nrerr: Non-recoverable error interrupt line -+ * -+ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not -+ * control this QBMan instance, these values may simply be place-holders. The -+ * idea is simply that we be able to distinguish between them, eg. so that SWP -+ * descriptors can identify which QBMan instance they belong to. -+ */ -+struct qbman_block_desc { -+ void *ccsr_reg_bar; -+ int irq_rerr; -+ int irq_nrerr; -+}; -+ -+enum qbman_eqcr_mode { -+ qman_eqcr_vb_ring = 2, /* Valid bit, with eqcr in ring mode */ -+ qman_eqcr_vb_array, /* Valid bit, with eqcr in array mode */ -+}; -+ -+/** -+ * struct qbman_swp_desc - qbman software portal descriptor structure -+ * @block: The QBMan instance. -+ * @cena_bar: Cache-enabled portal register map. -+ * @cinh_bar: Cache-inhibited portal register map. -+ * @irq: -1 if unused (or unassigned) -+ * @idx: SWPs within a QBMan are indexed. -1 if opaque to the user. -+ * @qman_version: the qman version. -+ * @eqcr_mode: Select the eqcr mode, currently only valid bit ring mode and -+ * valid bit array mode are supported. -+ * -+ * Descriptor for a QBMan software portal, expressed in terms that make sense to -+ * the user context. Ie. on MC, this information is likely to be true-physical, -+ * and instantiated statically at compile-time. On GPP, this information is -+ * likely to be obtained via "discovery" over a partition's "MC bus" -+ * (ie. in response to a MC portal command), and would take into account any -+ * virtualisation of the GPP user's address space and/or interrupt numbering. -+ */ -+struct qbman_swp_desc { -+ const struct qbman_block_desc *block; -+ uint8_t *cena_bar; -+ uint8_t *cinh_bar; -+ int irq; -+ int idx; -+ uint32_t qman_version; -+ enum qbman_eqcr_mode eqcr_mode; -+}; -+ -+/* Driver object for managing a QBMan portal */ -+struct qbman_swp; -+ -+/** -+ * struct qbman_fd - basci structure for qbman frame descriptor -+ * @words: for easier/faster copying the whole FD structure. -+ * @addr_lo: the lower 32 bits of the address in FD. -+ * @addr_hi: the upper 32 bits of the address in FD. -+ * @len: the length field in FD. -+ * @bpid_offset: represent the bpid and offset fields in FD. offset in -+ * the MS 16 bits, BPID in the LS 16 bits. -+ * @frc: frame context -+ * @ctrl: the 32bit control bits including dd, sc,... va, err. -+ * @flc_lo: the lower 32bit of flow context. -+ * @flc_hi: the upper 32bits of flow context. -+ * -+ * Place-holder for FDs, we represent it via the simplest form that we need for -+ * now. Different overlays may be needed to support different options, etc. (It -+ * is impractical to define One True Struct, because the resulting encoding -+ * routines (lots of read-modify-writes) would be worst-case performance whether -+ * or not circumstances required them.) -+ * -+ * Note, as with all data-structures exchanged between software and hardware (be -+ * they located in the portal register map or DMA'd to and from main-memory), -+ * the driver ensures that the caller of the driver API sees the data-structures -+ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words -+ * contained within this structure are represented in host-endianness, even if -+ * hardware always treats them as little-endian. As such, if any of these fields -+ * are interpreted in a binary (rather than numerical) fashion by hardware -+ * blocks (eg. accelerators), then the user should be careful. We illustrate -+ * with an example; -+ * -+ * Suppose the desired behaviour of an accelerator is controlled by the "frc" -+ * field of the FDs that are sent to it. Suppose also that the behaviour desired -+ * by the user corresponds to an "frc" value which is expressed as the literal -+ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit -+ * value in which 0xfe is the first byte and 0xba is the last byte, and as -+ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If -+ * the software is little-endian also, this can simply be achieved by setting -+ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set -+ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is -+ * to treat the 32-bit words as numerical values, in which the offset of a field -+ * from the beginning of the first byte (as required or generated by hardware) -+ * is numerically encoded by a left-shift (ie. by raising the field to a -+ * corresponding power of 2). Ie. in the current example, software could set -+ * "frc" in the following way, and it would work correctly on both little-endian -+ * and big-endian operation; -+ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24); -+ */ -+struct qbman_fd { -+ union { -+ uint32_t words[8]; -+ struct qbman_fd_simple { -+ uint32_t addr_lo; -+ uint32_t addr_hi; -+ uint32_t len; -+ uint32_t bpid_offset; -+ uint32_t frc; -+ uint32_t ctrl; -+ uint32_t flc_lo; -+ uint32_t flc_hi; -+ } simple; -+ }; -+}; -+ -+#endif /* !_FSL_QBMAN_BASE_H */ -diff --git a/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h -new file mode 100644 -index 0000000..9e8e5f2 ---- /dev/null -+++ b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h -@@ -0,0 +1,1087 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_QBMAN_PORTAL_H -+#define _FSL_QBMAN_PORTAL_H -+ -+#include -+ -+/** -+ * DOC - QBMan portal APIs to implement the following functions: -+ * - Initialize and destroy Software portal object. -+ * - Read and write Software portal interrupt registers. -+ * - Enqueue, including setting the enqueue descriptor, and issuing enqueue -+ * command etc. -+ * - Dequeue, including setting the dequeue descriptor, issuing dequeue command, -+ * parsing the dequeue response in DQRR and memeory, parsing the state change -+ * notifications etc. -+ * - Release, including setting the release descriptor, and issuing the buffer -+ * release command. -+ * - Acquire, acquire the buffer from the given buffer pool. -+ * - FQ management. -+ * - Channel management, enable/disable CDAN with or without context. -+ */ -+ -+/** -+ * qbman_swp_init() - Create a functional object representing the given -+ * QBMan portal descriptor. -+ * @d: the given qbman swp descriptor -+ * -+ * Return qbman_swp portal object for success, NULL if the object cannot -+ * be created. -+ */ -+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); -+ -+/** -+ * qbman_swp_finish() - Create and destroy a functional object representing -+ * the given QBMan portal descriptor. -+ * @p: the qbman_swp object to be destroyed. -+ * -+ */ -+void qbman_swp_finish(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_get_desc() - Get the descriptor of the given portal object. -+ * @p: the given portal object. -+ * -+ * Return the descriptor for this portal. -+ */ -+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *); -+ -+ /**************/ -+ /* Interrupts */ -+ /**************/ -+ -+/* EQCR ring interrupt */ -+#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001) -+/* Enqueue command dispatched interrupt */ -+#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002) -+/* DQRR non-empty interrupt */ -+#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004) -+/* RCR ring interrupt */ -+#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008) -+/* Release command dispatched interrupt */ -+#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010) -+/* Volatile dequeue command interrupt */ -+#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020) -+ -+/** -+ * qbman_swp_interrupt_get_vanish() - Get the data in software portal -+ * interrupt status disable register. -+ * @p: the given software portal object. -+ * -+ * Return the settings in SWP_ISDR register. -+ */ -+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_interrupt_set_vanish() - Set the data in software portal -+ * interrupt status disable register. -+ * @p: the given software portal object. -+ * @mask: The value to set in SWP_IDSR register. -+ */ -+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_read_status() - Get the data in software portal -+ * interrupt status register. -+ * @p: the given software portal object. -+ * -+ * Return the settings in SWP_ISR register. -+ */ -+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_interrupt_clear_status() - Set the data in software portal -+ * interrupt status register. -+ * @p: the given software portal object. -+ * @mask: The value to set in SWP_ISR register. -+ */ -+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_get_trigger() - Get the data in software portal -+ * interrupt enable register. -+ * @p: the given software portal object. -+ * -+ * Return the settings in SWP_IER register. -+ */ -+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_interrupt_set_trigger() - Set the data in software portal -+ * interrupt enable register. -+ * @p: the given software portal object. -+ * @mask: The value to set in SWP_IER register. -+ */ -+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_get_inhibit() - Get the data in software portal -+ * interrupt inhibit register. -+ * @p: the given software portal object. -+ * -+ * Return the settings in SWP_IIR register. -+ */ -+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_interrupt_set_inhibit() - Set the data in software portal -+ * interrupt inhibit register. -+ * @p: the given software portal object. -+ * @mask: The value to set in SWP_IIR register. -+ */ -+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit); -+ -+ /************/ -+ /* Dequeues */ -+ /************/ -+ -+/** -+ * struct qbman_result - structure for qbman dequeue response and/or -+ * notification. -+ * @dont_manipulate_directly: the 16 32bit data to represent the whole -+ * possible qbman dequeue result. -+ */ -+struct qbman_result { -+ uint32_t dont_manipulate_directly[16]; -+}; -+ -+/* TODO: -+ *A DQRI interrupt can be generated when there are dequeue results on the -+ * portal's DQRR (this mechanism does not deal with "pull" dequeues to -+ * user-supplied 'storage' addresses). There are two parameters to this -+ * interrupt source, one is a threshold and the other is a timeout. The -+ * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or -+ * if the ring has been non-empty for been longer than 'timeout' nanoseconds. -+ * For timeout, an approximation to the desired nanosecond-granularity value is -+ * made, so there are get and set APIs to allow the user to see what actual -+ * timeout is set (compared to the timeout that was requested). */ -+int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh); -+int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout); -+int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout); -+ -+/* ------------------- */ -+/* Push-mode dequeuing */ -+/* ------------------- */ -+ -+/* The user of a portal can enable and disable push-mode dequeuing of up to 16 -+ * channels independently. It does not specify this toggling by channel IDs, but -+ * rather by specifying the index (from 0 to 15) that has been mapped to the -+ * desired channel. -+ */ -+ -+/** -+ * qbman_swp_push_get() - Get the push dequeue setup. -+ * @s: the software portal object. -+ * @channel_idx: the channel index to query. -+ * @enabled: returned boolean to show whether the push dequeue is enabled for -+ * the given channel. -+ */ -+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled); -+ -+/** -+ * qbman_swp_push_set() - Enable or disable push dequeue. -+ * @s: the software portal object. -+ * @channel_idx: the channel index.. -+ * @enable: enable or disable push dequeue. -+ * -+ * The user of a portal can enable and disable push-mode dequeuing of up to 16 -+ * channels independently. It does not specify this toggling by channel IDs, but -+ * rather by specifying the index (from 0 to 15) that has been mapped to the -+ * desired channel. -+ */ -+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable); -+ -+/* ------------------- */ -+/* Pull-mode dequeuing */ -+/* ------------------- */ -+ -+/** -+ * struct qbman_pull_desc - the structure for pull dequeue descriptor -+ * @dont_manipulate_directly: the 6 32bit data to represent the whole -+ * possible settings for pull dequeue descriptor. -+ */ -+struct qbman_pull_desc { -+ uint32_t dont_manipulate_directly[6]; -+}; -+ -+enum qbman_pull_type_e { -+ /* dequeue with priority precedence, respect intra-class scheduling */ -+ qbman_pull_type_prio = 1, -+ /* dequeue with active FQ precedence, respect ICS */ -+ qbman_pull_type_active, -+ /* dequeue with active FQ precedence, no ICS */ -+ qbman_pull_type_active_noics -+}; -+ -+/** -+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ * @d: the pull dequeue descriptor to be cleared. -+ */ -+void qbman_pull_desc_clear(struct qbman_pull_desc *d); -+ -+/** -+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage -+ * @d: the pull dequeue descriptor to be set. -+ * @storage: the pointer of the memory to store the dequeue result. -+ * @storage_phys: the physical address of the storage memory. -+ * @stash: to indicate whether write allocate is enabled. -+ * -+ * If not called, or if called with 'storage' as NULL, the result pull dequeues -+ * will produce results to DQRR. If 'storage' is non-NULL, then results are -+ * produced to the given memory location (using the physical/DMA address which -+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not -+ * those writes to main-memory express a cache-warming attribute. -+ */ -+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, -+ struct qbman_result *storage, -+ dma_addr_t storage_phys, -+ int stash); -+/** -+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued. -+ * @d: the pull dequeue descriptor to be set. -+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive. -+ */ -+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, -+ uint8_t numframes); -+/** -+ * qbman_pull_desc_set_token() - Set dequeue token for pull command -+ * @d: the dequeue descriptor -+ * @token: the token to be set -+ * -+ * token is the value that shows up in the dequeue response that can be used to -+ * detect when the results have been published. The easiest technique is to zero -+ * result "storage" before issuing a dequeue, and use any non-zero 'token' value -+ */ -+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token); -+ -+/* Exactly one of the following descriptor "actions" should be set. (Calling any -+ * one of these will replace the effect of any prior call to one of these.) -+ * - pull dequeue from the given frame queue (FQ) -+ * - pull dequeue from any FQ in the given work queue (WQ) -+ * - pull dequeue from any FQ in any WQ in the given channel -+ */ -+/** -+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues. -+ * @fqid: the frame queue index of the given FQ. -+ */ -+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid); -+ -+/** -+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues. -+ * @wqid: composed of channel id and wqid within the channel. -+ * @dct: the dequeue command type. -+ */ -+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid, -+ enum qbman_pull_type_e dct); -+ -+/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command -+ * dequeues. -+ * @chid: the channel id to be dequeued. -+ * @dct: the dequeue command type. -+ */ -+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid, -+ enum qbman_pull_type_e dct); -+ -+/** -+ * qbman_swp_pull() - Issue the pull dequeue command -+ * @s: the software portal object. -+ * @d: the software portal descriptor which has been configured with -+ * the set of qbman_pull_desc_set_*() calls. -+ * -+ * Return 0 for success, and -EBUSY if the software portal is not ready -+ * to do pull dequeue. -+ */ -+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d); -+ -+/* -------------------------------- */ -+/* Polling DQRR for dequeue results */ -+/* -------------------------------- */ -+ -+/** -+ * qbman_swp_dqrr_next() - Get an valid DQRR entry. -+ * @s: the software portal object. -+ * -+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry -+ * only once, so repeated calls can return a sequence of DQRR entries, without -+ * requiring they be consumed immediately or in any particular order. -+ */ -+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *); -+ -+/** -+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from -+ * qbman_swp_dqrr_next(). -+ * @s: the software portal object. -+ * @dq: the DQRR entry to be consumed. -+ */ -+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq); -+ -+/** -+ * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr -+ * @dqrr: the given dqrr object. -+ * -+ * Return dqrr index. -+ */ -+uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr); -+ -+/** -+ * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the -+ * given portal -+ * @s: the given portal. -+ * @idx: the dqrr index. -+ * -+ * Return dqrr entry object. -+ */ -+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx); -+ -+/* ------------------------------------------------- */ -+/* Polling user-provided storage for dequeue results */ -+/* ------------------------------------------------- */ -+ -+/** -+ * qbman_result_has_new_result() - Check and get the dequeue response from the -+ * dq storage memory set in pull dequeue command -+ * @s: the software portal object. -+ * @dq: the dequeue result read from the memory. -+ * -+ * Only used for user-provided storage of dequeue results, not DQRR. For -+ * efficiency purposes, the driver will perform any required endianness -+ * conversion to ensure that the user's dequeue result storage is in host-endian -+ * format (whether or not that is the same as the little-endian format that -+ * hardware DMA'd to the user's storage). As such, once the user has called -+ * qbman_result_has_new_result() and been returned a valid dequeue result, -+ * they should not call it again on the same memory location (except of course -+ * if another dequeue command has been executed to produce a new result to that -+ * location). -+ * -+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid -+ * dequeue result. -+ */ -+int qbman_result_has_new_result(struct qbman_swp *s, -+ const struct qbman_result *dq); -+ -+/* -------------------------------------------------------- */ -+/* Parsing dequeue entries (DQRR and user-provided storage) */ -+/* -------------------------------------------------------- */ -+ -+/** -+ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not -+ * @dq: the dequeue result to be checked. -+ * -+ * DQRR entries may contain non-dequeue results, ie. notifications -+ */ -+int qbman_result_is_DQ(const struct qbman_result *); -+ -+/** -+ * qbman_result_is_SCN() - Check the dequeue result is notification or not -+ * @dq: the dequeue result to be checked. -+ * -+ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change -+ * notifications" of one type or another. Some APIs apply to all of them, of the -+ * form qbman_result_SCN_***(). -+ */ -+static inline int qbman_result_is_SCN(const struct qbman_result *dq) -+{ -+ return !qbman_result_is_DQ(dq); -+} -+ -+/* Recognise different notification types, only required if the user allows for -+ * these to occur, and cares about them when they do. -+ */ -+ -+/** -+ * qbman_result_is_FQDAN() - Check for FQ Data Availability -+ * @dq: the qbman_result object. -+ * -+ * Return 1 if this is FQDAN. -+ */ -+int qbman_result_is_FQDAN(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_is_CDAN() - Check for Channel Data Availability -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is CDAN. -+ */ -+int qbman_result_is_CDAN(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_is_CSCN() - Check for Congestion State Change -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is CSCN. -+ */ -+int qbman_result_is_CSCN(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_is_BPSCN() - Check for Buffer Pool State Change. -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is BPSCN. -+ */ -+int qbman_result_is_BPSCN(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_is_CGCU() - Check for Congestion Group Count Update. -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is CGCU. -+ */ -+int qbman_result_is_CGCU(const struct qbman_result *dq); -+ -+/* Frame queue state change notifications; (FQDAN in theory counts too as it -+ * leaves a FQ parked, but it is primarily a data availability notification) -+ */ -+ -+/** -+ * qbman_result_is_FQRN() - Check for FQ Retirement Notification. -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is FQRN. -+ */ -+int qbman_result_is_FQRN(const struct qbman_result *); -+ -+/** -+ * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is FQRNI. -+ */ -+int qbman_result_is_FQRNI(const struct qbman_result *); -+ -+/** -+ * qbman_result_is_FQPN() - Check for FQ Park Notification -+ * @dq: the qbman_result object to check. -+ * -+ * Return 1 if this is FQPN. -+ */ -+int qbman_result_is_FQPN(const struct qbman_result *dq); -+ -+/* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE) -+ */ -+/* FQ empty */ -+#define QBMAN_DQ_STAT_FQEMPTY 0x80 -+/* FQ held active */ -+#define QBMAN_DQ_STAT_HELDACTIVE 0x40 -+/* FQ force eligible */ -+#define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20 -+/* Valid frame */ -+#define QBMAN_DQ_STAT_VALIDFRAME 0x10 -+/* FQ ODP enable */ -+#define QBMAN_DQ_STAT_ODPVALID 0x04 -+/* Volatile dequeue */ -+#define QBMAN_DQ_STAT_VOLATILE 0x02 -+/* volatile dequeue command is expired */ -+#define QBMAN_DQ_STAT_EXPIRED 0x01 -+ -+/** -+ * qbman_result_DQ_flags() - Get the STAT field of dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the state field. -+ */ -+uint32_t qbman_result_DQ_flags(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull -+ * command. -+ * @dq: the dequeue result. -+ * -+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue. -+ */ -+static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq) -+{ -+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE); -+} -+ -+/** -+ * qbman_result_DQ_is_pull_complete() - Check whether the pull command is -+ * completed. -+ * @dq: the dequeue result. -+ * -+ * Return boolean. -+ */ -+static inline int qbman_result_DQ_is_pull_complete( -+ const struct qbman_result *dq) -+{ -+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED); -+} -+ -+/** -+ * qbman_result_DQ_seqnum() - Get the seqnum field in dequeue response -+ * seqnum is valid only if VALIDFRAME flag is TRUE -+ * @dq: the dequeue result. -+ * -+ * Return seqnum. -+ */ -+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response -+ * odpid is valid only if ODPVAILD flag is TRUE. -+ * @dq: the dequeue result. -+ * -+ * Return odpid. -+ */ -+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_fqid() - Get the fqid in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return fqid. -+ */ -+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_byte_count() - Get the byte count in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the byte count remaining in the FQ. -+ */ -+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_frame_count - Get the frame count in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame count remaining in the FQ. -+ */ -+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame queue context. -+ */ -+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq); -+ -+/** -+ * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame descriptor. -+ */ -+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq); -+ -+/* State-change notifications (FQDAN/CDAN/CSCN/...). */ -+ -+/** -+ * qbman_result_SCN_state() - Get the state field in State-change notification -+ * @scn: the state change notification. -+ * -+ * Return the state in the notifiation. -+ */ -+uint8_t qbman_result_SCN_state(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_SCN_rid() - Get the resource id from the notification -+ * @scn: the state change notification. -+ * -+ * Return the resource id. -+ */ -+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_SCN_ctx() - get the context from the notification -+ * @scn: the state change notification. -+ * -+ * Return the context. -+ */ -+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_SCN_state_in_mem() - Get the state in notification written -+ * in memory -+ * @scn: the state change notification. -+ * -+ * Return the state. -+ */ -+uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_SCN_rid_in_mem() - Get the resource id in notification written -+ * in memory. -+ * @scn: the state change notification. -+ * -+ * Return the resource id. -+ */ -+uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn); -+ -+/* Type-specific "resource IDs". Mainly for illustration purposes, though it -+ * also gives the appropriate type widths. -+ */ -+/* Get the FQID from the FQDAN */ -+#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq) -+/* Get the FQID from the FQRN */ -+#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq) -+/* Get the FQID from the FQRNI */ -+#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq) -+/* Get the FQID from the FQPN */ -+#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq) -+/* Get the channel ID from the CDAN */ -+#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) -+/* Get the CGID from the CSCN */ -+#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) -+ -+/** -+ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN -+ * @scn: the state change notification. -+ * -+ * Return the buffer pool id. -+ */ -+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_bpscn_has_free_bufs() - Check whether there are free -+ * buffers in the pool from BPSCN. -+ * @scn: the state change notification. -+ * -+ * Return the number of free buffers. -+ */ -+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the -+ * buffer pool is depleted. -+ * @scn: the state change notification. -+ * -+ * Return the status of buffer pool depletion. -+ */ -+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer -+ * pool is surplus or not. -+ * @scn: the state change notification. -+ * -+ * Return the status of buffer pool surplus. -+ */ -+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message -+ * @scn: the state change notification. -+ * -+ * Return the BPSCN context. -+ */ -+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn); -+ -+/* Parsing CGCU */ -+/** -+ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid -+ * @scn: the state change notification. -+ * -+ * Return the CGCU resource id. -+ */ -+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn); -+ -+/** -+ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU -+ * @scn: the state change notification. -+ * -+ * Return instantaneous count in the CGCU notification. -+ */ -+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn); -+ -+ /************/ -+ /* Enqueues */ -+ /************/ -+ -+/** -+ * struct qbman_eq_desc - structure of enqueue descriptor -+ * @dont_manipulate_directly: the 8 32bit data to represent the whole -+ * possible qbman enqueue setting in enqueue descriptor. -+ */ -+struct qbman_eq_desc { -+ uint32_t dont_manipulate_directly[8]; -+}; -+ -+/** -+ * struct qbman_eq_response - structure of enqueue response -+ * @dont_manipulate_directly: the 16 32bit data to represent the whole -+ * enqueue response. -+ */ -+struct qbman_eq_response { -+ uint32_t dont_manipulate_directly[16]; -+}; -+ -+/** -+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ * @d: the given enqueue descriptor. -+ */ -+void qbman_eq_desc_clear(struct qbman_eq_desc *d); -+ -+/* Exactly one of the following descriptor "actions" should be set. (Calling -+ * any one of these will replace the effect of any prior call to one of these.) -+ * - enqueue without order-restoration -+ * - enqueue with order-restoration -+ * - fill a hole in the order-restoration sequence, without any enqueue -+ * - advance NESN (Next Expected Sequence Number), without any enqueue -+ * 'respond_success' indicates whether an enqueue response should be DMA'd -+ * after success (otherwise a response is DMA'd only after failure). -+ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to -+ * be enqueued. -+ */ -+ -+/** -+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp -+ * @d: the enqueue descriptor. -+ * @response_success: 1 = enqueue with response always; 0 = enqueue with -+ * rejections returned on a FQ. -+ */ -+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success); -+/** -+ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor -+ * @d: the enqueue descriptor. -+ * @response_success: 1 = enqueue with response always; 0 = enqueue with -+ * rejections returned on a FQ. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ * @incomplete: indiates whether this is the last fragments using the same -+ * sequeue number. -+ */ -+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, -+ uint32_t opr_id, uint32_t seqnum, int incomplete); -+ -+/** -+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence -+ * without any enqueue -+ * @d: the enqueue descriptor. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ */ -+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum); -+ -+/** -+ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number) -+ * without any enqueue -+ * @d: the enqueue descriptor. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ */ -+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum); -+/** -+ * qbman_eq_desc_set_response() - Set the enqueue response info. -+ * @d: the enqueue descriptor -+ * @storage_phys: the physical address of the enqueue response in memory. -+ * @stash: indicate that the write allocation enabled or not. -+ * -+ * In the case where an enqueue response is DMA'd, this determines where that -+ * response should go. (The physical/DMA address is given for hardware's -+ * benefit, but software should interpret it as a "struct qbman_eq_response" -+ * data structure.) 'stash' controls whether or not the write to main-memory -+ * expresses a cache-warming attribute. -+ */ -+void qbman_eq_desc_set_response(struct qbman_eq_desc *d, -+ dma_addr_t storage_phys, -+ int stash); -+ -+/** -+ * qbman_eq_desc_set_token() - Set token for the enqueue command -+ * @d: the enqueue descriptor -+ * @token: the token to be set. -+ * -+ * token is the value that shows up in an enqueue response that can be used to -+ * detect when the results have been published. The easiest technique is to zero -+ * result "storage" before issuing an enqueue, and use any non-zero 'token' -+ * value. -+ */ -+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token); -+ -+/** -+ * Exactly one of the following descriptor "targets" should be set. (Calling any -+ * one of these will replace the effect of any prior call to one of these.) -+ * - enqueue to a frame queue -+ * - enqueue to a queuing destination -+ * Note, that none of these will have any affect if the "action" type has been -+ * set to "orp_hole" or "orp_nesn". -+ */ -+/** -+ * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command -+ * @d: the enqueue descriptor -+ * @fqid: the id of the frame queue to be enqueued. -+ */ -+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid); -+ -+/** -+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command. -+ * @d: the enqueue descriptor -+ * @qdid: the id of the queuing destination to be enqueued. -+ * @qd_bin: the queuing destination bin -+ * @qd_prio: the queuing destination priority. -+ */ -+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid, -+ uint32_t qd_bin, uint32_t qd_prio); -+ -+/** -+ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt -+ * @d: the enqueue descriptor -+ * @enable: boolean to enable/disable EQDI -+ * -+ * Determines whether or not the portal's EQDI interrupt source should be -+ * asserted after the enqueue command is completed. -+ */ -+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable); -+ -+/** -+ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command. -+ * @d: the enqueue descriptor. -+ * @enable: enabled/disable DCA mode. -+ * @dqrr_idx: DCAP_CI, the DCAP consumer index. -+ * @park: determine the whether park the FQ or not -+ * -+ * Determines whether or not a portal DQRR entry should be consumed once the -+ * enqueue command is completed. (And if so, and the DQRR entry corresponds to a -+ * held-active (order-preserving) FQ, whether the FQ should be parked instead of -+ * being rescheduled.) -+ */ -+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable, -+ uint32_t dqrr_idx, int park); -+ -+/** -+ * qbman_swp_enqueue() - Issue an enqueue command. -+ * @s: the software portal used for enqueue. -+ * @d: the enqueue descriptor. -+ * @fd: the frame descriptor to be enqueued. -+ * -+ * Please note that 'fd' should only be NULL if the "action" of the -+ * descriptor is "orp_hole" or "orp_nesn". -+ * -+ * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready. -+ */ -+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd); -+ -+/* TODO: -+ * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt. -+ * @s: the software portal. -+ * @thresh: the threshold to trigger the EQRI interrupt. -+ * -+ * An EQRI interrupt can be generated when the fill-level of EQCR falls below -+ * the 'thresh' value set here. Setting thresh==0 (the default) disables. -+ */ -+int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh); -+ -+ /*******************/ -+ /* Buffer releases */ -+ /*******************/ -+/** -+ * struct qbman_release_desc - The structure for buffer release descriptor -+ * @dont_manipulate_directly: the 32bit data to represent the whole -+ * possible settings of qbman release descriptor. -+ */ -+struct qbman_release_desc { -+ uint32_t dont_manipulate_directly[1]; -+}; -+ -+/** -+ * qbman_release_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ * @d: the qbman release descriptor. -+ */ -+void qbman_release_desc_clear(struct qbman_release_desc *d); -+ -+/** -+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to -+ * @d: the qbman release descriptor. -+ */ -+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid); -+ -+/** -+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI -+ * interrupt source should be asserted after the release command is completed. -+ * @d: the qbman release descriptor. -+ */ -+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable); -+ -+/** -+ * qbman_swp_release() - Issue a buffer release command. -+ * @s: the software portal object. -+ * @d: the release descriptor. -+ * @buffers: a pointer pointing to the buffer address to be released. -+ * @num_buffers: number of buffers to be released, must be less than 8. -+ * -+ * Return 0 for success, -EBUSY if the release command ring is not ready. -+ */ -+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, -+ const uint64_t *buffers, unsigned int num_buffers); -+ -+/* TODO: -+ * qbman_swp_release_thresh() - Set threshold for RCRI interrupt -+ * @s: the software portal. -+ * @thresh: the threshold. -+ * An RCRI interrupt can be generated when the fill-level of RCR falls below -+ * the 'thresh' value set here. Setting thresh==0 (the default) disables. -+ */ -+int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh); -+ -+ /*******************/ -+ /* Buffer acquires */ -+ /*******************/ -+/** -+ * qbman_swp_acquire() - Issue a buffer acquire command. -+ * @s: the software portal object. -+ * @bpid: the buffer pool index. -+ * @buffers: a pointer pointing to the acquired buffer address|es. -+ * @num_buffers: number of buffers to be acquired, must be less than 8. -+ * -+ * Return 0 for success, or negative error code if the acquire command -+ * fails. -+ */ -+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers, -+ unsigned int num_buffers); -+ -+ /*****************/ -+ /* FQ management */ -+ /*****************/ -+/** -+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue to be scheduled. -+ * -+ * There are a couple of different ways that a FQ can end up parked state, -+ * This schedules it. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid); -+ -+/** -+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue to be forced. -+ * -+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled -+ * and thus be available for selection by any channel-dequeuing behaviour (push -+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still -+ * empty at the time this happens, the resulting dq_entry will have no FD. -+ * (qbman_result_DQ_fd() will return NULL.) -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid); -+ -+/** -+ * These functions change the FQ flow-control stuff between XON/XOFF. (The -+ * default is XON.) This setting doesn't affect enqueues to the FQ, just -+ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when -+ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is -+ * changed to XOFF after it had already become truly-scheduled to a channel, and -+ * a pull dequeue of that channel occurs that selects that FQ for dequeuing, -+ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will -+ * return NULL.) -+ */ -+/** -+ * qbman_swp_fq_xon() - XON the frame queue. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid); -+/** -+ * qbman_swp_fq_xoff() - XOFF the frame queue. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid); -+ -+ /**********************/ -+ /* Channel management */ -+ /**********************/ -+ -+/** -+ * If the user has been allocated a channel object that is going to generate -+ * CDANs to another channel, then these functions will be necessary. -+ * CDAN-enabled channels only generate a single CDAN notification, after which -+ * it they need to be reenabled before they'll generate another. (The idea is -+ * that pull dequeuing will occur in reaction to the CDAN, followed by a -+ * reenable step.) Each function generates a distinct command to hardware, so a -+ * combination function is provided if the user wishes to modify the "context" -+ * (which shows up in each CDAN message) each time they reenable, as a single -+ * command to hardware. -+ */ -+ -+/** -+ * qbman_swp_CDAN_set_context() - Set CDAN context -+ * @s: the software portal object. -+ * @channelid: the channel index. -+ * @ctx: the context to be set in CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx); -+ -+/** -+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel. -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid); -+ -+/** -+ * qbman_swp_CDAN_disable() - disable CDAN for the channel. -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid); -+ -+/** -+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * @ctx: the context set in CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx); -+int qbman_swp_fill_ring(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd, -+ uint8_t burst_index); -+int qbman_swp_flush_ring(struct qbman_swp *s); -+void qbman_sync(void); -+int qbman_swp_send_multiple(struct qbman_swp *s, -+ const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd, -+ int frames_to_send); -+ -+int qbman_check_command_complete(struct qbman_swp *s, -+ const struct qbman_result *dq); -+#endif /* !_FSL_QBMAN_PORTAL_H */ -diff --git a/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h b/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h -new file mode 100644 -index 0000000..bd5d4d5 ---- /dev/null -+++ b/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h -@@ -0,0 +1,330 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef _RTE_ETH_DPAA2_PVT_H_ -+#define _RTE_ETH_DPAA2_PVT_H_ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+typedef uint64_t dma_addr_t; -+ -+#define FALSE 0 -+#define TRUE 1 -+#ifndef false -+#define false FALSE -+#endif -+#ifndef true -+#define true TRUE -+#endif -+#define lower_32_bits(x) ((uint32_t)(x)) -+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16)) -+ -+#ifndef ETH_ADDR_LEN -+#define ETH_ADDR_LEN 6 -+#endif -+#ifndef ETH_VLAN_HLEN -+#define ETH_VLAN_HLEN 4 /** < Vlan Header Length */ -+#endif -+ -+#define NUM_MAX_RECV_FRAMES 16 -+ -+#define MC_PORTAL_INDEX 0 -+#define NUM_DPIO_REGIONS 2 -+#define NUM_DQS_PER_QUEUE 2 -+#define MC_PORTALS_BASE_PADDR 0x00080C000000ULL -+#define MC_PORTAL_STRIDE 0x10000 -+#define MC_PORTAL_SIZE 64 -+#define MC_PORTAL_ID_TO_PADDR(portal_id) \ -+(MC_PORTALS_BASE_PADDR + (portal_id) * MC_PORTAL_STRIDE) -+ -+struct dpaa2_dpio_dev { -+ TAILQ_ENTRY(dpaa2_dpio_dev) next; /**< Pointer to Next device instance */ -+ uint16_t index; /**< Index of a instance in the list */ -+ rte_atomic16_t ref_count; /**< How many thread contexts are sharing this.*/ -+ struct fsl_mc_io *dpio; /** handle to DPIO portal object */ -+ uint16_t token; -+ struct qbman_swp *sw_portal; /** SW portal object */ -+ const struct qbman_result *dqrr[4]; /**< DQRR Entry for this SW portal */ -+ pthread_mutex_t lock; /** Required when Portal is shared */ -+ void *mc_portal; /**< MC Portal for configuring this device */ -+ uintptr_t qbman_portal_ce_paddr; /**< Physical address of Cache Enabled Area */ -+ uintptr_t ce_size; /**< Size of the CE region */ -+ uintptr_t qbman_portal_ci_paddr; /**< Physical address of Cache Inhibit Area */ -+ uintptr_t ci_size; /**< Size of the CI region */ -+ void *intr_handle; -+ int32_t vfio_fd; /**< File descriptor received via VFIO */ -+ int32_t hw_id; /**< An unique ID of this DPIO device instance */ -+}; -+ -+struct queue_storage_info_t { -+ struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE]; -+ struct qbman_result *active_dqs; -+ int toggle; -+}; -+ -+struct thread_io_info_t { -+ struct dpaa2_dpio_dev *dpio_dev; -+ struct dpaa2_dpio_dev *sec_dpio_dev; -+ struct qbman_result *global_active_dqs; -+}; -+ -+/*! Global per thread DPIO portal */ -+extern __thread struct thread_io_info_t thread_io_info; -+/*! Global MCP list */ -+extern void *(*mcp_ptr_list); -+ -+/* Refer to Table 7-3 in SEC BG */ -+struct qbman_fle { -+ uint32_t addr_lo; -+ uint32_t addr_hi; -+ uint32_t length; -+ /* FMT must be 00, MSB is final bit */ -+ uint32_t fin_bpid_offset; -+ uint32_t frc; -+ uint32_t reserved[3]; /* Not used currently */ -+}; -+ -+/* Maximum release/acquire from QBMAN */ -+#define DPAA2_MBUF_MAX_ACQ_REL 7 -+ -+#define MAX_BPID 256 -+ -+/*Macros to define operations on FD*/ -+#define DPAA2_SET_FD_ADDR(fd, addr) \ -+ fd->simple.addr_lo = lower_32_bits((uint64_t)addr); \ -+ fd->simple.addr_hi = upper_32_bits((uint64_t)addr); -+#define DPAA2_SET_FD_LEN(fd, length) fd->simple.len = length -+#define DPAA2_SET_FD_BPID(fd, bpid) fd->simple.bpid_offset |= bpid; -+#define DPAA2_SET_FD_IVP(fd) ((fd->simple.bpid_offset |= 0x00004000)) -+#define DPAA2_SET_FD_OFFSET(fd, offset) (fd->simple.bpid_offset |= (uint32_t)(offset) << 16); -+#define DPAA2_SET_FD_INTERNAL_JD(fd, len) fd->simple.frc = (0x80000000 | (len)); -+#define DPAA2_SET_FD_FRC(fd, frc) fd->simple.frc = frc; -+#define DPAA2_RESET_FD_CTRL(fd) fd->simple.ctrl = 0; -+ -+#define DPAA2_SET_FD_ASAL(fd, asal) (fd->simple.ctrl |= (asal << 16)) -+#define DPAA2_SET_FD_FLC(fd, addr) \ -+ fd->simple.flc_lo = lower_32_bits((uint64_t)addr); \ -+ fd->simple.flc_hi = upper_32_bits((uint64_t)addr); -+#define DPAA2_SET_FLE_INTERNAL_JD(fle, len) fle->frc = (0x80000000 | (len)); -+#define DPAA2_GET_FLE_ADDR(fle) \ -+ (uint64_t)((((uint64_t)(fle->addr_hi)) << 32) + fle->addr_lo) -+#define DPAA2_SET_FLE_ADDR(fle, addr) \ -+ fle->addr_lo = lower_32_bits((uint64_t)addr); \ -+ fle->addr_hi = upper_32_bits((uint64_t)addr); -+#define DPAA2_SET_FLE_OFFSET(fle, offset) (fle)->fin_bpid_offset |= (uint32_t)(offset) << 16; -+#define DPAA2_SET_FLE_BPID(fle, bpid) (fle)->fin_bpid_offset |= (uint64_t)bpid; -+#define DPAA2_GET_FLE_BPID(fle, bpid) (fle->fin_bpid_offset & 0x000000ff) -+#define DPAA2_SET_FLE_FIN(fle) fle->fin_bpid_offset |= (uint64_t)1 << 31; -+#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000)) -+#define DPAA2_SET_FD_COMPOUND_FMT(fd) \ -+ fd->simple.bpid_offset |= (uint32_t)1 << 28; -+#define DPAA2_GET_FD_ADDR(fd) \ -+ (uint64_t)((((uint64_t)(fd->simple.addr_hi)) << 32) + fd->simple.addr_lo) -+#define DPAA2_GET_FD_LEN(fd) (fd->simple.len) -+#define DPAA2_GET_FD_BPID(fd) ((fd->simple.bpid_offset & 0x00003FFF)) -+#define DPAA2_GET_FD_IVP(fd) ((fd->simple.bpid_offset & 0x00004000) >> 14) -+#define DPAA2_GET_FD_OFFSET(fd) ((fd->simple.bpid_offset & 0x0FFF0000) >> 16) -+#define DPAA2_GET_FD_FRC(fd) (fd->simple.frc) -+#define DPAA2_GET_FD_FLC(fd) \ -+ (uint64_t)((((uint64_t)(fd->simple.flc_hi)) << 32) + fd->simple.flc_lo) -+ -+#define DPAA2_SET_FLE_SG_EXT(fle) fle->fin_bpid_offset |= (uint64_t)1 << 29; -+#define DPAA2_IS_SET_FLE_SG_EXT(fle) \ -+ (fle->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0 -+ -+#define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \ -+ ((struct rte_mbuf *)((uint64_t)buf - meta_data_size)) -+#define DPAA2_BUF_FROM_INLINE_MBUF(mbuf, meta_data_size) \ -+ ((uint8_t *)((uint64_t)mbuf + meta_data_size)) -+ -+#define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64) -+ -+/*Macros to define QBMAN enqueue options */ -+#define DPAA2_ETH_EQ_DISABLE 0 /*!< Dont Enqueue the Frame */ -+#define DPAA2_ETH_EQ_RESP_ON_SUCC 1 /*!< Enqueue the Frame with -+ response after success*/ -+#define DPAA2_ETH_EQ_RESP_ON_FAIL 2 /*!< Enqueue the Frame with -+ response after failure*/ -+#define DPAA2_ETH_EQ_NO_RESP 3 /*!< Enqueue the Frame without -+ response*/ -+/* Only Enqueue Error responses will be -+ * pushed on FQID_ERR of Enqueue FQ */ -+#define DPAA2_EQ_RESP_ERR_FQ 0 -+/* All Enqueue responses will be pushed on address -+ * set with qbman_eq_desc_set_response */ -+#define DPAA2_EQ_RESP_ALWAYS 1 -+ -+#define DPAA2_MAX_BUF_POOLS 8 -+ -+struct dpbp_node { -+ struct dpbp_node *next; -+ struct fsl_mc_io dpbp; -+ uint16_t token; -+ int dpbp_id; -+}; -+ -+struct buf_pool_cfg { -+ void *addr; /*!< The address from where DPAA2 will carve out the -+ * buffers. 'addr' should be 'NULL' if user wants -+ * to create buffers from the memory which user -+ * asked DPAA2 to reserve during 'nadk init' */ -+ phys_addr_t phys_addr; /*!< corresponding physical address -+ * of the memory provided in addr */ -+ uint32_t num; /*!< number of buffers */ -+ uint32_t size; /*!< size of each buffer. 'size' should include -+ * any headroom to be reserved and alignment */ -+ uint16_t align; /*!< Buffer alignment (in bytes) */ -+ uint16_t bpid; /*!< The buffer pool id. This will be filled -+ *in by DPAA2 for each buffer pool */ -+}; -+ -+struct buf_pool { -+ uint32_t size; -+ uint32_t num_bufs; -+ uint16_t bpid; -+ uint8_t *h_bpool_mem; -+ struct rte_mempool *mp; -+ struct dpbp_node *dpbp_node; -+}; -+ -+/*! -+ * Buffer pool list configuration structure. User need to give DPAA2 the -+ * valid number of 'num_buf_pools'. -+ */ -+struct dpaa2_bp_list_cfg { -+ struct buf_pool_cfg buf_pool; /* Configuration -+ * of each buffer pool */ -+}; -+ -+struct dpaa2_bp_list { -+ struct dpaa2_bp_list *next; -+ struct rte_mempool *mp; -+ struct buf_pool buf_pool; -+}; -+ -+struct bp_info { -+ uint32_t meta_data_size; -+ uint32_t bpid; -+ struct dpaa2_bp_list *bp_list; -+}; -+ -+#define mempool_to_bpinfo(mp) ((struct bp_info *)mp->pool_data) -+#define mempool_to_bpid(mp) ((mempool_to_bpinfo(mp))->bpid) -+ -+extern struct dpaa2_bp_list *h_bp_list; -+ -+/* todo - this is costly, need to write a fast coversion routine */ -+static void *dpaa2_mem_ptov(phys_addr_t paddr) -+{ -+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); -+ int i; -+ -+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { -+ if (paddr >= memseg[i].phys_addr && -+ (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len) -+ return (void *)(memseg[i].addr_64 + (paddr - memseg[i].phys_addr)); -+ } -+ return NULL; -+} -+ -+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) -+{ -+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); -+ int i; -+ -+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { -+ if (vaddr >= memseg[i].addr_64 && -+ vaddr < memseg[i].addr_64 + memseg[i].len) -+ return memseg[i].phys_addr + (vaddr - memseg[i].addr_64); -+ } -+ return (phys_addr_t)(NULL); -+} -+ -+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA -+/* -+ * When we are using Physical addresses as IO Virtual Addresses, -+ * we call conversion routines nadk_mem_vtop & nadk_mem_ptov wherever required. -+ * These routines are called with help of below MACRO's -+ */ -+ -+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) (mbuf->buf_physaddr) -+#define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr) -+ -+/** -+ * macro to convert Virtual address to IOVA -+ */ -+#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr)) -+ -+/** -+ * macro to convert IOVA to Virtual address -+ */ -+#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova)) -+ -+/** -+ * macro to convert modify the memory containing Virtual address to IOVA -+ */ -+#define DPAA2_MODIFY_VADDR_TO_IOVA(_mem, _type) \ -+ {_mem = (_type)(dpaa2_mem_vtop((uint64_t)(_mem))); } -+ -+/** -+ * macro to convert modify the memory containing IOVA to Virtual address -+ */ -+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \ -+ {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); } -+ -+#else -+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) (mbuf->buf_addr) -+#define DPAA2_OP_VADDR_TO_IOVA(op) (op) -+ -+#define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr) -+#define DPAA2_IOVA_TO_VADDR(_iova) (_iova) -+#define DPAA2_MODIFY_VADDR_TO_IOVA(_mem, _type) -+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) -+#endif -+ -+/* Function definitions for Mempool operations */ -+int hw_mbuf_init(struct rte_mempool *mp, void *_m); -+int hw_mbuf_free_bulk(struct rte_mempool *pool, void * const *obj_table, -+ unsigned n); -+int hw_mbuf_alloc_bulk(struct rte_mempool *pool, void **obj_table, -+ unsigned count); -+int hw_mbuf_create_pool(struct rte_mempool *mp); -+unsigned hw_mbuf_get_count(const struct rte_mempool *mp); -+ -+#endif -diff --git a/drivers/net/dpaa2/rte_eth_dpbp.c b/drivers/net/dpaa2/rte_eth_dpbp.c -new file mode 100644 -index 0000000..a4d29c9 ---- /dev/null -+++ b/drivers/net/dpaa2/rte_eth_dpbp.c -@@ -0,0 +1,377 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "rte_pci.h" -+#include "rte_memzone.h" -+ -+#include "rte_eth_dpaa2_pvt.h" -+#include "fsl_qbman_portal.h" -+#include -+ -+#include -+#include "dpaa2_logs.h" -+ -+static struct dpbp_node *g_dpbp_list; -+static struct dpbp_node *avail_dpbp; -+ -+struct bp_info bpid_info[MAX_BPID]; -+ -+struct dpaa2_bp_list *h_bp_list; -+ -+int -+dpaa2_create_dpbp_device( -+ int dpbp_id) -+{ -+ struct dpbp_node *dpbp_node; -+ int ret; -+ -+ /* Allocate DPAA2 dpbp handle */ -+ dpbp_node = (struct dpbp_node *)malloc(sizeof(struct dpbp_node)); -+ if (!dpbp_node) { -+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPBP Device\n"); -+ return -1; -+ } -+ -+ /* Open the dpbp object */ -+ dpbp_node->dpbp.regs = mcp_ptr_list[MC_PORTAL_INDEX]; -+ ret = dpbp_open(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_id, &dpbp_node->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Resource allocation failure with err code: %d", -+ ret); -+ free(dpbp_node); -+ return -1; -+ } -+ -+ /* Clean the device first */ -+ ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure cleaning dpbp device with" -+ "error code %d\n", ret); -+ return -1; -+ } -+ -+ dpbp_node->dpbp_id = dpbp_id; -+ /* Add the dpbp handle into the global list */ -+ dpbp_node->next = g_dpbp_list; -+ g_dpbp_list = dpbp_node; -+ avail_dpbp = g_dpbp_list; -+ -+ PMD_DRV_LOG(INFO, "Buffer resource initialized"); -+ -+ return 0; -+} -+ -+int hw_mbuf_create_pool(struct rte_mempool *mp) -+{ -+ struct dpaa2_bp_list *bp_list; -+ struct dpbp_attr dpbp_attr; -+ uint32_t bpid; -+ int ret; -+ -+ if (!avail_dpbp) { -+ PMD_DRV_LOG(ERR, "DPAA2 resources not available\n"); -+ return -1; -+ } -+ -+ ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token); -+ if (ret != 0) { -+ PMD_DRV_LOG(ERR, "Resource enable failure with" -+ "err code: %d\n", ret); -+ return -1; -+ } -+ -+ ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW, -+ avail_dpbp->token, &dpbp_attr); -+ if (ret != 0) { -+ PMD_DRV_LOG(ERR, "Resource read failure with" -+ "err code: %d\n", ret); -+ ret = dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, -+ avail_dpbp->token); -+ return -1; -+ } -+ -+ /* Allocate the bp_list which will be added into global_bp_list */ -+ bp_list = (struct dpaa2_bp_list *)malloc(sizeof(struct dpaa2_bp_list)); -+ if (!bp_list) { -+ PMD_DRV_LOG(ERR, "No heap memory available\n"); -+ return -1; -+ } -+ -+ /* Set parameters of buffer pool list */ -+ bp_list->buf_pool.num_bufs = mp->size; -+ bp_list->buf_pool.size = mp->elt_size -+ - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp); -+ bp_list->buf_pool.bpid = dpbp_attr.bpid; -+ bp_list->buf_pool.h_bpool_mem = NULL; -+ bp_list->buf_pool.mp = mp; -+ bp_list->buf_pool.dpbp_node = avail_dpbp; -+ bp_list->next = h_bp_list; -+ -+ bpid = dpbp_attr.bpid; -+ -+ /* Increment the available DPBP */ -+ avail_dpbp = avail_dpbp->next; -+ -+ bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf) -+ + rte_pktmbuf_priv_size(mp); -+ bpid_info[bpid].bp_list = bp_list; -+ bpid_info[bpid].bpid = bpid; -+ -+ mp->pool_data = (void *)&bpid_info[bpid]; -+ -+ PMD_DRV_LOG(INFO, "BP List created for bpid =%d\n", dpbp_attr.bpid); -+ -+ h_bp_list = bp_list; -+ /* TODO: Replace with mp->pool_data->flags after creating appropriate -+ * pool_data structure -+ */ -+ mp->flags |= MEMPOOL_F_HW_PKT_POOL; -+ return 0; -+} -+ -+void hw_mbuf_free_pool(struct rte_mempool *mp __rte_unused) -+{ -+ /* TODO: -+ * 1. Release bp_list memory allocation -+ * 2. opposite of dpbp_enable() -+ * -+ */ -+ struct dpaa2_bp_list *bp; -+ -+ /* Iterate over h_bp_list linked list and release each element */ -+ while (h_bp_list) { -+ bp = h_bp_list; -+ h_bp_list = bp->next; -+ -+ /* TODO: Should be changed to rte_free */ -+ free(bp); -+ } -+ -+ PMD_DRV_LOG(DEBUG, "(%s) called\n", __func__); -+ return; -+} -+ -+static inline void dpaa2_mbuf_release(uint64_t buf, uint32_t bpid) -+{ -+ struct qbman_release_desc releasedesc; -+ struct qbman_swp *swp; -+ int ret; -+ -+ if (!thread_io_info.dpio_dev) { -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret != 0) { -+ PMD_DRV_LOG(ERR, "Failed to allocate IO portal"); -+ return; -+ } -+ } -+ swp = thread_io_info.dpio_dev->sw_portal; -+ -+ /* Create a release descriptor required for releasing -+ * buffers into BMAN */ -+ qbman_release_desc_clear(&releasedesc); -+ qbman_release_desc_set_bpid(&releasedesc, bpid); -+ -+ do { -+ /* Release buffer into the BMAN */ -+ ret = qbman_swp_release(swp, &releasedesc, &buf, 1); -+ } while (ret == -EBUSY); -+ PMD_TX_FREE_LOG(DEBUG, "Released %p address to BMAN\n", buf); -+} -+ -+int hw_mbuf_alloc_bulk(struct rte_mempool *pool, -+ void **obj_table, unsigned count) -+{ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER -+ static int alloc; -+#endif -+ struct qbman_swp *swp; -+ uint32_t mbuf_size; -+ uint16_t bpid; -+ uint64_t bufs[RTE_MEMPOOL_CACHE_MAX_SIZE + 1]; -+ int ret; -+ unsigned i, n = 0; -+ struct bp_info *bp_info; -+ -+ PMD_DRV_LOG_RAW(INFO, "%s/n", __func__); -+ bp_info = mempool_to_bpinfo(pool); -+ -+ if (!(bp_info->bp_list)) { -+ printf("\nDPAA2 buffer pool not configured\n"); -+ return -2; -+ } -+ -+ bpid = bp_info->bpid; -+ -+ if (!thread_io_info.dpio_dev) { -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret != 0) { -+ PMD_DRV_LOG(ERR, "Failed to allocate IO portal"); -+ return -1; -+ } -+ } -+ swp = thread_io_info.dpio_dev->sw_portal; -+ -+ /* if number of buffers requested is less than 7 */ -+ if (count < DPAA2_MBUF_MAX_ACQ_REL) { -+ ret = qbman_swp_acquire(swp, bpid, &bufs[n], count); -+ if (ret <= 0) { -+ PMD_DRV_LOG(ERR, "Failed to allocate buffers %d", ret); -+ return -1; -+ } -+ n = ret; -+ goto set_buf; -+ } -+ -+ while (n < count) { -+ ret = 0; -+ /* Acquire is all-or-nothing, so we drain in 7s, -+ * then the remainder. -+ */ -+ if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) { -+ ret = qbman_swp_acquire(swp, bpid, &bufs[n], -+ DPAA2_MBUF_MAX_ACQ_REL); -+ if (ret == DPAA2_MBUF_MAX_ACQ_REL) { -+ n += ret; -+ } -+ } else { -+ ret = qbman_swp_acquire(swp, bpid, &bufs[n], count - n); -+ if (ret > 0) { -+ PMD_DRV_LOG(DEBUG, "Drained buffer: %x", -+ bufs[n]); -+ n += ret; -+ } -+ } -+ /* In case of less than requested number of buffers available -+ * in pool, qbman_swp_acquire returns 0 -+ */ -+ if (ret <= 0) { -+ PMD_DRV_LOG(WARNING, "Buffer aquire failed with" -+ "err code: %d", ret); -+ break; -+ } -+ } -+ -+ /* This function either returns expected buffers or error */ -+ if (count != n) { -+ i = 0; -+ /* Releasing all buffers allocated */ -+ while (i < n) { -+ dpaa2_mbuf_release(bufs[i], bpid); -+ i++; -+ } -+ return -1; -+ } -+ -+ if (ret < 0 || n == 0) { -+ PMD_DRV_LOG_RAW(ERR, "Failed to allocate buffers %d", ret); -+ return -1; -+ } -+set_buf: -+ -+ mbuf_size = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(pool); -+ -+ for (i = 0; i < n; i++) { -+ DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t); -+ obj_table[i] = (struct rte_mbuf *)(bufs[i] - mbuf_size); -+ PMD_DRV_LOG(DEBUG, "Acquired %p address %p from BMAN\n", -+ (void *)bufs[i], (void *)obj_table[i]); -+ } -+ -+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER -+ alloc += n; -+ PMD_DRV_LOG_RAW(INFO, "Total = %d , req = %d done = %d", -+ alloc, count, n); -+#endif -+ return 0; -+} -+ -+int hw_mbuf_free_bulk(struct rte_mempool *pool, void * const *obj_table, -+ unsigned n) -+{ -+ unsigned i; -+ struct bp_info *bp_info; -+ -+ PMD_DRV_LOG_RAW(INFO, "%s/n", __func__); -+ -+ bp_info = mempool_to_bpinfo(pool); -+ if (!(bp_info->bp_list)) { -+ PMD_DRV_LOG(INFO, "DPAA2 buffer pool not configured\n"); -+ return -1; -+ } -+ /* TODO - optimize it */ -+ for (i = 0; i < n; i++) { -+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA -+ dpaa2_mbuf_release( -+ (uint64_t)rte_mempool_virt2phy(pool, obj_table[i]) -+ + bp_info->meta_data_size, bp_info->bpid); -+#else -+ dpaa2_mbuf_release((uint64_t)obj_table[i] -+ + bp_info->meta_data_size, bp_info->bpid); -+#endif -+ -+ } -+ -+ return 0; -+} -+ -+unsigned hw_mbuf_get_count(const struct rte_mempool *mp __rte_unused) -+{ -+ /* TODO: incomplete */ -+ return 0; -+} -+ -+struct rte_mempool_ops dpaa2_mpool_ops = { -+ .name = "dpaa2", -+ .alloc = hw_mbuf_create_pool, -+ .free = hw_mbuf_free_pool, -+ .enqueue = hw_mbuf_free_bulk, -+ .dequeue = hw_mbuf_alloc_bulk, -+ .get_count = hw_mbuf_get_count, -+}; -+ -+MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops); -diff --git a/drivers/net/dpaa2/rte_eth_dpio.c b/drivers/net/dpaa2/rte_eth_dpio.c -new file mode 100644 -index 0000000..2d06923 ---- /dev/null -+++ b/drivers/net/dpaa2/rte_eth_dpio.c -@@ -0,0 +1,336 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "rte_pci.h" -+#include "rte_memzone.h" -+#include -+ -+#include "rte_eth_dpaa2_pvt.h" -+#include "fsl_qbman_portal.h" -+#include -+ -+#include -+#include "dpaa2_logs.h" -+ -+#define NUM_HOST_CPUS RTE_MAX_LCORE -+ -+__thread struct thread_io_info_t thread_io_info; -+ -+TAILQ_HEAD(dpio_device_list, dpaa2_dpio_dev); -+static struct dpio_device_list *dpio_dev_list; /*!< DPIO device list */ -+static uint32_t io_space_count; -+ -+/*Stashing Macros*/ -+#define DPAA2_CORE_CLUSTER_BASE 0x04 -+#define DPAA2_CORE_CLUSTER_FIRST (DPAA2_CORE_CLUSTER_BASE + 0) -+#define DPAA2_CORE_CLUSTER_SECOND (DPAA2_CORE_CLUSTER_BASE + 1) -+#define DPAA2_CORE_CLUSTER_THIRD (DPAA2_CORE_CLUSTER_BASE + 2) -+#define DPAA2_CORE_CLUSTER_FOURTH (DPAA2_CORE_CLUSTER_BASE + 3) -+ -+#define DPAA2_CORE_CLUSTER_GET(sdest, cpu_id) \ -+do { \ -+ if (cpu_id == 0 || cpu_id == 1) \ -+ sdest = DPAA2_CORE_CLUSTER_FIRST; \ -+ else if (cpu_id == 2 || cpu_id == 3) \ -+ sdest = DPAA2_CORE_CLUSTER_SECOND; \ -+ else if (cpu_id == 4 || cpu_id == 5) \ -+ sdest = DPAA2_CORE_CLUSTER_THIRD; \ -+ else \ -+ sdest = DPAA2_CORE_CLUSTER_FOURTH; \ -+} while (0) -+ -+static int -+configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev) -+{ -+ struct qbman_swp_desc p_des; -+ struct dpio_attr attr; -+ -+ dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io)); -+ if (!dpio_dev->dpio) { -+ PMD_DRV_LOG(ERR, "Memory allocation failure\n"); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "\t Alocated DPIO[%p]", dpio_dev->dpio); -+ dpio_dev->dpio->regs = dpio_dev->mc_portal; -+ if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id, -+ &dpio_dev->token)) { -+ PMD_DRV_LOG(ERR, "Failed to allocate IO space\n"); -+ free(dpio_dev->dpio); -+ return -1; -+ } -+ -+ if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) { -+ PMD_DRV_LOG(ERR, "Failed to reset dpio\n"); -+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); -+ free(dpio_dev->dpio); -+ return -1; -+ } -+ -+ if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) { -+ PMD_DRV_LOG(ERR, "Failed to Enable dpio\n"); -+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); -+ free(dpio_dev->dpio); -+ return -1; -+ } -+ -+ if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW, -+ dpio_dev->token, &attr)) { -+ PMD_DRV_LOG(ERR, "DPIO Get attribute failed\n"); -+ dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); -+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); -+ free(dpio_dev->dpio); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "Qbman Portal ID %d", attr.qbman_portal_id); -+ PMD_DRV_LOG(INFO, "Portal CE addr 0x%lX", attr.qbman_portal_ce_offset); -+ PMD_DRV_LOG(INFO, "Portal CI addr 0x%lX", attr.qbman_portal_ci_offset); -+ -+ /* Configure & setup SW portal */ -+ p_des.block = NULL; -+ p_des.idx = attr.qbman_portal_id; -+ p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr); -+ p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr); -+ p_des.irq = -1; -+ p_des.qman_version = attr.qbman_version; -+ -+ PMD_DRV_LOG(INFO, "Portal CE addr 0x%p", p_des.cena_bar); -+ PMD_DRV_LOG(INFO, "Portal CI addr 0x%p", p_des.cinh_bar); -+ -+ dpio_dev->sw_portal = qbman_swp_init(&p_des); -+ if (dpio_dev->sw_portal == NULL) { -+ PMD_DRV_LOG(ERR, " QBMan SW Portal Init failed\n"); -+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); -+ free(dpio_dev->dpio); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "QBMan SW Portal 0x%p\n", dpio_dev->sw_portal); -+ -+ return 0; -+} -+ -+int dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev) -+{ -+ int sdest; -+ int cpu_id, ret; -+ -+ /* Set the Stashing Destination */ -+ cpu_id = rte_lcore_id(); -+ if (cpu_id < 0) { -+ cpu_id = rte_get_master_lcore(); -+ if (cpu_id < 0) { -+ PMD_DRV_LOG(ERR, "\tGetting CPU Index failed\n"); -+ return -1; -+ } -+ } -+ -+ /* -+ * In case of running DPDK on the Virtual Machine the Stashing -+ * Destination gets set in the H/W w.r.t. the Virtual CPU ID's. -+ * As a W.A. environment variable HOST_START_CPU tells which -+ * the offset of the host start core of the Virtual Machine threads. -+ */ -+ if (getenv("HOST_START_CPU")) { -+ cpu_id += -+ atoi(getenv("HOST_START_CPU")); -+ cpu_id = cpu_id % NUM_HOST_CPUS; -+ } -+ -+ /* Set the STASH Destination depending on Current CPU ID. -+ Valid values of SDEST are 4,5,6,7. Where, -+ CPU 0-1 will have SDEST 4 -+ CPU 2-3 will have SDEST 5.....and so on. -+ */ -+ DPAA2_CORE_CLUSTER_GET(sdest, cpu_id); -+ PMD_DRV_LOG(INFO, "Portal= %d CPU= %u SDEST= %d\n", -+ dpio_dev->index, cpu_id, sdest); -+ -+ ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW, -+ dpio_dev->token, sdest); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "%d ERROR in SDEST\n", ret); -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static inline struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void) -+{ -+ struct dpaa2_dpio_dev *dpio_dev = NULL; -+ int ret; -+ -+ /* Get DPIO dev handle from list using index */ -+ TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) { -+ if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count)) -+ break; -+ } -+ if (!dpio_dev) -+ return NULL; -+ -+ ret = dpaa2_configure_stashing(dpio_dev); -+ if (ret) { -+ RTE_LOG(ERR, EAL, "dpaa2_configure_stashing failed"); -+ } -+ return dpio_dev; -+} -+int -+dpaa2_affine_qbman_swp(void) -+{ -+ if (thread_io_info.dpio_dev) -+ return 0; -+ -+ /* Populate the thread_io_info structure */ -+ thread_io_info.dpio_dev = dpaa2_get_qbman_swp(); -+ if (thread_io_info.dpio_dev) -+ return 0; -+ else -+ return -1; -+} -+ -+int -+dpaa2_affine_qbman_swp_sec(void) -+{ -+ if (thread_io_info.sec_dpio_dev) -+ return 0; -+ -+ /* Populate the thread_io_info structure */ -+ thread_io_info.sec_dpio_dev = dpaa2_get_qbman_swp(); -+ if (thread_io_info.sec_dpio_dev) -+ return 0; -+ else -+ return -1; -+} -+ -+int -+dpaa2_create_dpio_device(struct fsl_vfio_device *vdev, -+ struct vfio_device_info *obj_info, -+ int object_id) -+{ -+ struct dpaa2_dpio_dev *dpio_dev; -+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)}; -+ -+ if (obj_info->num_regions < NUM_DPIO_REGIONS) { -+ PMD_DRV_LOG(ERR, "ERROR, Not sufficient number " -+ "of DPIO regions.\n"); -+ return -1; -+ } -+ -+ if (!dpio_dev_list) { -+ dpio_dev_list = malloc(sizeof(struct dpio_device_list)); -+ if (NULL == dpio_dev_list) { -+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPIO list\n"); -+ return -1; -+ } -+ -+ /* Initialize the DPIO List */ -+ TAILQ_INIT(dpio_dev_list); -+ } -+ -+ dpio_dev = malloc(sizeof(struct dpaa2_dpio_dev)); -+ if (!dpio_dev) { -+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPIO Device\n"); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "\t Aloocated DPIO [%p]", dpio_dev); -+ dpio_dev->dpio = NULL; -+ dpio_dev->hw_id = object_id; -+ dpio_dev->vfio_fd = vdev->fd; -+ rte_atomic16_init(&dpio_dev->ref_count); -+ /* Using single portal for all devices */ -+ dpio_dev->mc_portal = mcp_ptr_list[MC_PORTAL_INDEX]; -+ -+ reg_info.index = 0; -+ if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { -+ printf("vfio: error getting region info\n"); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "\t Region Offset = %llx", reg_info.offset); -+ PMD_DRV_LOG(INFO, "\t Region Size = %llx", reg_info.size); -+ dpio_dev->ce_size = reg_info.size; -+ dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size, -+ PROT_WRITE | PROT_READ, MAP_SHARED, -+ dpio_dev->vfio_fd, reg_info.offset); -+ -+ /* Create Mapping for QBMan Cache Enabled area. This is a fix for -+ SMMU fault for DQRR statshing transaction. */ -+ if (vfio_dmamap_mem_region(dpio_dev->qbman_portal_ce_paddr, -+ reg_info.offset, reg_info.size)) { -+ PMD_DRV_LOG(ERR, "DMAMAP for Portal CE area failed.\n"); -+ return -1; -+ } -+ -+ reg_info.index = 1; -+ if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { -+ printf("vfio: error getting region info\n"); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "\t Region Offset = %llx", reg_info.offset); -+ PMD_DRV_LOG(INFO, "\t Region Size = %llx", reg_info.size); -+ dpio_dev->ci_size = reg_info.size; -+ dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size, -+ PROT_WRITE | PROT_READ, MAP_SHARED, -+ dpio_dev->vfio_fd, reg_info.offset); -+ -+ if (configure_dpio_qbman_swp(dpio_dev)) { -+ PMD_DRV_LOG(ERR, -+ "Failed in configuring the qbman portal for dpio %d\n", -+ dpio_dev->hw_id); -+ return -1; -+ } -+ -+ io_space_count++; -+ dpio_dev->index = io_space_count; -+ TAILQ_INSERT_HEAD(dpio_dev_list, dpio_dev, next); -+ -+ return 0; -+} -diff --git a/drivers/net/dpaa2/rte_eth_dpni.c b/drivers/net/dpaa2/rte_eth_dpni.c -new file mode 100644 -index 0000000..c1587dc ---- /dev/null -+++ b/drivers/net/dpaa2/rte_eth_dpni.c -@@ -0,0 +1,2269 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+/* MC header files */ -+#include -+#include -+#include "rte_eth_dpaa2_pvt.h" -+#include "rte_eth_dpni_annot.h" -+#include "dpaa2_logs.h" -+ -+#include -+#include -+ -+#define DPAA2_STASHING -+ -+/* tx fd send batching */ -+#define QBMAN_MULTI_TX -+/* #define DPAA2_CGR_SUPPORT */ -+ -+ -+#define DPAA2_MIN_RX_BUF_SIZE 512 -+#define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/ -+ -+#define RTE_ETH_DPAA2_SNAPSHOT_LEN 65535 -+#define RTE_ETH_DPAA2_SNAPLEN 4096 -+#define RTE_ETH_DPAA2_PROMISC 1 -+#define RTE_ETH_DPAA2_TIMEOUT -1 -+#define ETH_DPAA2_RX_IFACE_ARG "rx_iface" -+#define ETH_DPAA2_TX_IFACE_ARG "tx_iface" -+#define ETH_DPAA2_IFACE_ARG "iface" -+ -+static const char *drivername = "DPNI PMD"; -+ -+#define MAX_TCS DPNI_MAX_TC -+#define MAX_RX_QUEUES 64 -+#define MAX_TX_QUEUES 64 -+ -+/*Maximum number of slots available in TX ring*/ -+#define MAX_SLOTS 8 -+ -+/*Threshold for a queue to *Enter* Congestion state. -+ It is set to 128 frames of size 64 bytes.*/ -+#define CONG_ENTER_THRESHOLD (128 * 64) -+ -+/*Threshold for a queue to *Exit* Congestion state. -+ It is set to 98 frames of size 64 bytes*/ -+#define CONG_EXIT_THRESHOLD (98 * 64) -+ -+/*! Maximum number of flow distributions per traffic class */ -+#define MAX_DIST_PER_TC 16 -+ -+/* Size of the input SMMU mapped memory required by MC */ -+#define DIST_PARAM_IOVA_SIZE 256 -+ -+struct dpaa2_queue { -+ void *dev; -+ int32_t eventfd; /*!< Event Fd of this queue */ -+ uint32_t fqid; /*!< Unique ID of this queue */ -+ uint8_t tc_index; /*!< traffic class identifier */ -+ uint16_t flow_id; /*!< To be used by DPAA2 frmework */ -+ uint64_t rx_pkts; -+ uint64_t tx_pkts; -+ uint64_t err_pkts; -+ union { -+ struct queue_storage_info_t *q_storage; -+ struct qbman_result *cscn; -+ }; -+}; -+ -+struct dpaa2_dev_priv { -+ void *hw; -+ int32_t hw_id; -+ int32_t qdid; -+ uint16_t token; -+ uint8_t nb_tx_queues; -+ uint8_t nb_rx_queues; -+ void *rx_vq[MAX_RX_QUEUES]; -+ void *tx_vq[MAX_TX_QUEUES]; -+ -+ struct dpaa2_bp_list *bp_list; /**data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ uint64_t value; -+ -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME, &value); -+ printf("Rx packets: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_BYTE, &value); -+ printf("Rx bytes: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_MCAST_FRAME, &value); -+ printf("Rx Multicast: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME_DROP, &value); -+ printf("Rx dropped: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME_DISCARD, &value); -+ printf("Rx discarded: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_FRAME, &value); -+ printf("Tx packets: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_BYTE, &value); -+ printf("Tx bytes: %ld\n", value); -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_FRAME_DISCARD, &value); -+ printf("Tx dropped: %ld\n", value); -+} -+ -+/** -+ * Atomically reads the link status information from global -+ * structure rte_eth_dev. -+ * -+ * @param dev -+ * - Pointer to the structure rte_eth_dev to read from. -+ * - Pointer to the buffer to be saved with the link status. -+ * -+ * @return -+ * - On success, zero. -+ * - On failure, negative value. -+ */ -+static inline int -+rte_dpni_dev_atomic_read_link_status(struct rte_eth_dev *dev, -+ struct rte_eth_link *link) -+{ -+ struct rte_eth_link *dst = link; -+ struct rte_eth_link *src = &dev->data->dev_link; -+ -+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, -+ *(uint64_t *)src) == 0) -+ return -1; -+ -+ return 0; -+} -+ -+/** -+ * Atomically writes the link status information into global -+ * structure rte_eth_dev. -+ * -+ * @param dev -+ * - Pointer to the structure rte_eth_dev to read from. -+ * - Pointer to the buffer to be saved with the link status. -+ * -+ * @return -+ * - On success, zero. -+ * - On failure, negative value. -+ */ -+static inline int -+rte_dpni_dev_atomic_write_link_status(struct rte_eth_dev *dev, -+ struct rte_eth_link *link) -+{ -+ struct rte_eth_link *dst = &dev->data->dev_link; -+ struct rte_eth_link *src = link; -+ -+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, -+ *(uint64_t *)src) == 0) -+ return -1; -+ -+ return 0; -+} -+ -+static inline void -+dpaa2_eth_parse_packet(struct rte_mbuf *mbuf, uint64_t hw_annot_addr) -+{ -+ uint32_t pkt_type = 0; -+ struct pkt_annotation *annotation = -+ (struct pkt_annotation *)hw_annot_addr; -+ -+ PMD_DRV_LOG(DEBUG, "\n 1 annotation = 0x%lx ", annotation->word4); -+ -+ if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L2_ETHER; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV4; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT)) -+ pkt_type /* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV6; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV4_EXT; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_UDP; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_TCP; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_SCTP; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_ICMP; -+ -+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) -+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_UNKNOWN; -+ -+ mbuf->packet_type = pkt_type; -+} -+ -+static inline -+struct rte_mbuf *eth_fd_to_mbuf(const struct qbman_fd *fd) -+{ -+ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( -+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), -+ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); -+ /* need to repopulated some of the fields, -+ as they may have changed in last transmission*/ -+ -+ -+ mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); -+ mbuf->data_len = DPAA2_GET_FD_LEN(fd); -+ mbuf->pkt_len = mbuf->data_len; -+ mbuf->next = NULL; -+ rte_mbuf_refcnt_set(mbuf, 1); -+ -+ PMD_DRV_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," -+ "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n", -+ mbuf, mbuf->buf_addr, mbuf->data_off, -+ DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), -+ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, -+ DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); -+ -+ /* Parse the packet */ -+ /* parse results are after the private - sw annotation area */ -+ dpaa2_eth_parse_packet(mbuf, -+ (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) -+ + DPAA2_FD_PTA_SIZE); -+ -+ mbuf->nb_segs = 1; -+ mbuf->ol_flags = 0; -+ -+ return mbuf; -+} -+ -+static void __attribute__ ((noinline)) eth_mbuf_to_fd(struct rte_mbuf *mbuf, -+ struct qbman_fd *fd, uint16_t bpid) -+{ -+ /*Resetting the buffer pool id and offset field*/ -+ fd->simple.bpid_offset = 0; -+ -+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); -+ DPAA2_SET_FD_LEN(fd, mbuf->data_len); -+ DPAA2_SET_FD_BPID(fd, bpid); -+ DPAA2_SET_FD_OFFSET(fd, mbuf->data_off); -+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); -+ -+ PMD_DRV_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d," -+ "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n", -+ mbuf, mbuf->buf_addr, mbuf->data_off, -+ DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), -+ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, -+ DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); -+ -+ return; -+} -+ -+static int eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, -+ struct qbman_fd *fd, uint16_t bpid) -+{ -+ struct rte_mbuf *m; -+ void *mb = NULL; -+ -+ if (hw_mbuf_alloc_bulk(bpid_info[bpid].bp_list->buf_pool.mp, &mb, 1)) { -+ PMD_DRV_LOG(WARNING, "Unable to allocated DPAA2 buffer"); -+ rte_pktmbuf_free(mbuf); -+ return -1; -+ } -+ m = (struct rte_mbuf *)mb; -+ memcpy((char *)m->buf_addr + mbuf->data_off, -+ (void *)((char *)mbuf->buf_addr + mbuf->data_off), -+ mbuf->pkt_len); -+ -+ /*Resetting the buffer pool id and offset field*/ -+ fd->simple.bpid_offset = 0; -+ -+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m)); -+ DPAA2_SET_FD_LEN(fd, mbuf->data_len); -+ DPAA2_SET_FD_BPID(fd, bpid); -+ DPAA2_SET_FD_OFFSET(fd, mbuf->data_off); -+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); -+ -+ PMD_DRV_LOG(DEBUG, "\nmbuf %p BMAN buf addr %p", -+ (void *)mbuf, mbuf->buf_addr); -+ -+ PMD_DRV_LOG(DEBUG, "\nfdaddr =%lx bpid =%d meta =%d off =%d, len =%d\n", -+ DPAA2_GET_FD_ADDR(fd), -+ DPAA2_GET_FD_BPID(fd), -+ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, -+ DPAA2_GET_FD_OFFSET(fd), -+ DPAA2_GET_FD_LEN(fd)); -+ /*free the original packet */ -+ rte_pktmbuf_free(mbuf); -+ -+ return 0; -+} -+ -+static uint16_t -+eth_dpaa2_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) -+{ -+ /* Function is responsible to receive frames for a given device and VQ*/ -+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; -+ struct qbman_result *dq_storage; -+ uint32_t fqid = dpaa2_q->fqid; -+ int ret, num_rx = 0; -+ uint8_t is_last = 0, status; -+ struct qbman_swp *swp; -+ const struct qbman_fd *fd; -+ struct qbman_pull_desc pulldesc; -+ struct rte_eth_dev *dev = dpaa2_q->dev; -+ -+ if (!thread_io_info.dpio_dev) { -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in affining portal\n"); -+ return 0; -+ } -+ } -+ swp = thread_io_info.dpio_dev->sw_portal; -+ dq_storage = dpaa2_q->q_storage->dq_storage[0]; -+ -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts); -+ qbman_pull_desc_set_fq(&pulldesc, fqid); -+ /* todo optimization - we can have dq_storage_phys available*/ -+ qbman_pull_desc_set_storage(&pulldesc, dq_storage, -+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); -+ -+ /*Issue a volatile dequeue command. */ -+ while (1) { -+ if (qbman_swp_pull(swp, &pulldesc)) { -+ PMD_DRV_LOG(ERR, "VDQ command is not issued." -+ "QBMAN is busy\n"); -+ /* Portal was busy, try again */ -+ continue; -+ } -+ break; -+ }; -+ -+ /* Receive the packets till Last Dequeue entry is found with -+ respect to the above issues PULL command. -+ */ -+ while (!is_last) { -+ /*Check if the previous issued command is completed. -+ *Also seems like the SWP is shared between the Ethernet Driver -+ *and the SEC driver.*/ -+ while (!qbman_check_command_complete(swp, dq_storage)) -+ ; -+ /* Loop until the dq_storage is updated with -+ * new token by QBMAN */ -+ while (!qbman_result_has_new_result(swp, dq_storage)) -+ ; -+ /* Check whether Last Pull command is Expired and -+ setting Condition for Loop termination */ -+ if (qbman_result_DQ_is_pull_complete(dq_storage)) { -+ is_last = 1; -+ /* Check for valid frame. */ -+ status = (uint8_t)qbman_result_DQ_flags(dq_storage); -+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { -+ PMD_DRV_LOG(DEBUG, "No frame is delivered\n"); -+ continue; -+ } -+ } -+ -+ fd = qbman_result_DQ_fd(dq_storage); -+ bufs[num_rx] = eth_fd_to_mbuf(fd); -+ bufs[num_rx]->port = dev->data->port_id; -+ -+ num_rx++; -+ dq_storage++; -+ } /* End of Packet Rx loop */ -+ -+ dpaa2_q->rx_pkts += num_rx; -+ -+ PMD_DRV_LOG(INFO, "Ethernet Received %d Packets\n", num_rx); -+ /*Return the total number of packets received to DPAA2 app*/ -+ return num_rx; -+} -+ -+static uint16_t -+eth_dpaa2_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) -+{ -+ /* Function is responsible to receive frames for a given device and VQ*/ -+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; -+ struct qbman_result *dq_storage; -+ uint32_t fqid = dpaa2_q->fqid; -+ int ret, i, num_rx = 0; -+ uint8_t is_last = 0, status; -+ struct qbman_swp *swp; -+ const struct qbman_fd *fd[16]; -+ struct qbman_pull_desc pulldesc; -+ struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; -+ struct rte_eth_dev *dev = dpaa2_q->dev; -+ -+ if (!thread_io_info.dpio_dev) { -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in affining portal\n"); -+ return 0; -+ } -+ } -+ swp = thread_io_info.dpio_dev->sw_portal; -+ -+ if (!q_storage->active_dqs) { -+ q_storage->toggle = 0; -+ dq_storage = q_storage->dq_storage[q_storage->toggle]; -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts); -+ qbman_pull_desc_set_fq(&pulldesc, fqid); -+ qbman_pull_desc_set_storage(&pulldesc, dq_storage, -+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); -+ if (thread_io_info.global_active_dqs) { -+ while (!qbman_check_command_complete(swp, thread_io_info.global_active_dqs)) -+ ; -+ } -+ while (1) { -+ if (qbman_swp_pull(swp, &pulldesc)) { -+ PMD_DRV_LOG(WARNING, "VDQ command is not issued." -+ "QBMAN is busy\n"); -+ /* Portal was busy, try again */ -+ continue; -+ } -+ break; -+ } -+ q_storage->active_dqs = dq_storage; -+ thread_io_info.global_active_dqs = dq_storage; -+ } -+ if (thread_io_info.global_active_dqs) -+ while (!qbman_check_command_complete(swp, thread_io_info.global_active_dqs)) -+ ; -+ dq_storage = q_storage->active_dqs; -+ while (!is_last) { -+ /* Loop until the dq_storage is updated with -+ * new token by QBMAN */ -+ struct rte_mbuf *mbuf; -+ -+ while (!qbman_result_has_new_result(swp, dq_storage)) -+ ; -+ rte_prefetch0((void *)((uint64_t)(dq_storage + 1))); -+ /* Check whether Last Pull command is Expired and -+ setting Condition for Loop termination */ -+ if (qbman_result_DQ_is_pull_complete(dq_storage)) { -+ is_last = 1; -+ /* Check for valid frame. */ -+ status = (uint8_t)qbman_result_DQ_flags(dq_storage); -+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { -+ PMD_DRV_LOG2(DEBUG, "No frame is delivered\n"); -+ continue; -+ } -+ } -+ fd[num_rx] = qbman_result_DQ_fd(dq_storage); -+ mbuf = (struct rte_mbuf *)DPAA2_IOVA_TO_VADDR( -+ DPAA2_GET_FD_ADDR(fd[num_rx]) -+ - bpid_info[DPAA2_GET_FD_BPID(fd[num_rx])].meta_data_size); -+ /* Prefeth mbuf */ -+ rte_prefetch0(mbuf); -+ /* Prefetch Annotation address from where we get parse results */ -+ rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx]) + DPAA2_FD_PTA_SIZE + 16)); -+ /*Prefetch Data buffer*/ -+ /* rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx]) + DPAA2_GET_FD_OFFSET(fd[num_rx]))); */ -+ dq_storage++; -+ num_rx++; -+ -+ } /* End of Packet Rx loop */ -+ -+ for (i = 0; i < num_rx; i++) { -+ bufs[i] = eth_fd_to_mbuf(fd[i]); -+ bufs[i]->port = dev->data->port_id; -+ } -+ -+ q_storage->toggle ^= 1; -+ dq_storage = q_storage->dq_storage[q_storage->toggle]; -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts); -+ qbman_pull_desc_set_fq(&pulldesc, fqid); -+ qbman_pull_desc_set_storage(&pulldesc, dq_storage, -+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); -+ /*Issue a volatile dequeue command. */ -+ -+ while (1) { -+ if (qbman_swp_pull(swp, &pulldesc)) { -+ PMD_DRV_LOG(WARNING, "VDQ command is not issued." -+ "QBMAN is busy\n"); -+ continue; -+ } -+ break; -+ } -+ q_storage->active_dqs = dq_storage; -+ thread_io_info.global_active_dqs = dq_storage; -+ -+ dpaa2_q->rx_pkts += num_rx; -+ -+ PMD_DRV_LOG2(INFO, "Ethernet Received %d Packets\n", num_rx); -+ /*Return the total number of packets received to DPAA2 app*/ -+ return num_rx; -+} -+ -+/* -+ * Callback to handle sending packets through a real NIC. -+ */ -+static uint16_t -+eth_dpaa2_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) -+{ -+ /* Function to transmit the frames to given device and VQ*/ -+ uint32_t loop; -+ int32_t ret; -+#ifdef QBMAN_MULTI_TX -+ struct qbman_fd fd_arr[8]; -+ uint32_t frames_to_send; -+#else -+ struct qbman_fd fd; -+#endif -+ struct rte_mempool *mp; -+ struct qbman_eq_desc eqdesc; -+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; -+ struct qbman_swp *swp; -+ uint16_t num_tx = 0; -+ uint16_t bpid; -+ struct rte_eth_dev *dev = dpaa2_q->dev; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ -+ if (!thread_io_info.dpio_dev) { -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in affining portal\n"); -+ return 0; -+ } -+ } -+ swp = thread_io_info.dpio_dev->sw_portal; -+ -+ PMD_DRV_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid); -+ -+ /*Prepare enqueue descriptor*/ -+ qbman_eq_desc_clear(&eqdesc); -+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); -+ qbman_eq_desc_set_response(&eqdesc, 0, 0); -+ qbman_eq_desc_set_qd(&eqdesc, priv->qdid, -+ dpaa2_q->flow_id, dpaa2_q->tc_index); -+ -+ /*Clear the unused FD fields before sending*/ -+#ifdef QBMAN_MULTI_TX -+ while (nb_pkts) { -+#ifdef DPAA2_CGR_SUPPORT -+ /*Check if the queue is congested*/ -+ if (qbman_result_is_CSCN(dpaa2_q->cscn)) -+ goto skip_tx; -+#endif -+ frames_to_send = (nb_pkts >> 3) ? MAX_SLOTS : nb_pkts; -+ -+ for (loop = 0; loop < frames_to_send; loop++) { -+ fd_arr[loop].simple.frc = 0; -+ DPAA2_RESET_FD_CTRL((&fd_arr[loop])); -+ DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL); -+ mp = (*bufs)->pool; -+ /* Not a hw_pkt pool allocated frame */ -+ if (mp && !(mp->flags & MEMPOOL_F_HW_PKT_POOL)) { -+ printf("\n non hw offload bufffer "); -+ /* alloc should be from the default buffer pool -+ attached to this interface */ -+ if (priv->bp_list) { -+ bpid = priv->bp_list->buf_pool.bpid; -+ } else { -+ printf("\n ??? why no bpool attached"); -+ num_tx = 0; -+ goto skip_tx; -+ } -+ if (eth_copy_mbuf_to_fd(*bufs, &fd_arr[loop], bpid)) { -+ bufs++; -+ continue; -+ } -+ } else { -+ RTE_ASSERT(mp); -+ bpid = mempool_to_bpid(mp); -+ eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid); -+ } -+ bufs++; -+ } -+ loop = 0; -+ while (loop < frames_to_send) { -+ loop += qbman_swp_send_multiple(swp, &eqdesc, -+ &fd_arr[loop], frames_to_send - loop); -+ } -+ -+ num_tx += frames_to_send; -+ dpaa2_q->tx_pkts += frames_to_send; -+ nb_pkts -= frames_to_send; -+ } -+#else -+#ifdef DPAA2_CGR_SUPPORT -+ /*Check if the queue is congested*/ -+ if(qbman_result_is_CSCN(dpaa2_q->cscn)) -+ goto skip_tx; -+#endif -+ -+ fd.simple.frc = 0; -+ DPAA2_RESET_FD_CTRL((&fd)); -+ DPAA2_SET_FD_FLC((&fd), NULL); -+ loop = 0; -+ -+ while (loop < nb_pkts) { -+ /*Prepare each packet which is to be sent*/ -+ mp = bufs[loop]->pool; -+ /* Not a hw_pkt pool allocated frame */ -+ if (mp && !(mp->flags & MEMPOOL_F_HW_PKT_POOL)) { -+ /* alloc should be from the default buffer pool -+ attached to this interface */ -+ if (priv->bp_list) { -+ bpid = priv->bp_list->buf_pool.bpid; -+ } else { -+ /* Buffer not from offloaded area as well as -+ * lacks buffer pool identifier. Cannot -+ * continue. -+ */ -+ PMD_DRV_LOG(ERR, "No Buffer pool " -+ "attached.\n"); -+ num_tx = 0; -+ goto skip_tx; -+ } -+ -+ if (eth_copy_mbuf_to_fd(bufs[loop], &fd, bpid)) { -+ loop++; -+ continue; -+ } -+ } else { -+ RTE_ASSERT(mp); -+ bpid = mempool_to_bpid(mp); -+ eth_mbuf_to_fd(bufs[loop], &fd, bpid); -+ } -+ /*Enqueue a single packet to the QBMAN*/ -+ do { -+ ret = qbman_swp_enqueue(swp, &eqdesc, &fd); -+ if (ret != 0) { -+ PMD_DRV_LOG(DEBUG, "Error in transmiting the frame\n"); -+ } -+ } while (ret != 0); -+ -+ /* Free the buffer shell */ -+ /* rte_pktmbuf_free(bufs[loop]); */ -+ num_tx++; loop++; -+ } -+ dpaa2_q->tx_pkts += num_tx; -+ dpaa2_q->err_pkts += nb_pkts - num_tx; -+#endif -+skip_tx: -+ return num_tx; -+} -+ -+static int -+dpaa2_vlan_stripping_set(struct rte_eth_dev *dev, int on) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ PMD_INIT_FUNC_TRACE(); -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return -1; -+ } -+ -+ ret = dpni_set_vlan_removal(dpni, CMD_PRI_LOW, priv->token, on); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to dpni_set_vlan_removal hwid =%d", -+ priv->hw_id); -+ return ret; -+} -+ -+static int -+dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return -1; -+ } -+ -+ if (on) -+ ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id); -+ else -+ ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id); -+ -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d", -+ ret, vlan_id, priv->hw_id); -+ -+ /*todo this should on global basis */ -+/* ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, on); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to set vlan filter"); -+*/ return ret; -+} -+ -+static void -+dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ int ret; -+ -+ if (mask & ETH_VLAN_FILTER_MASK) { -+ if (dev->data->dev_conf.rxmode.hw_vlan_filter) -+ ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, TRUE); -+ else -+ ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, FALSE); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "ret = %d Unable to set vlan filter", ret); -+ } -+ -+ if (mask & ETH_VLAN_STRIP_MASK) { -+ /* Enable or disable VLAN stripping */ -+ if (dev->data->dev_conf.rxmode.hw_vlan_strip) -+ dpaa2_vlan_stripping_set(dev, TRUE); -+ else -+ dpaa2_vlan_stripping_set(dev, FALSE); -+ } -+ -+ if (mask & ETH_VLAN_EXTEND_MASK) { -+ PMD_INIT_FUNC_TRACE(); -+/* if (dev->data->dev_conf.rxmode.hw_vlan_extend) -+ i40e_vsi_config_double_vlan(vsi, TRUE); -+ else -+ i40e_vsi_config_double_vlan(vsi, FALSE); -+*/ } -+} -+ -+static void -+dpaa2_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ -+ dev_info->driver_name = drivername; -+ dev_info->if_index = priv->hw_id; -+ dev_info->max_mac_addrs = priv->max_unicast_filters; -+ dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; -+ dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; -+ dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; -+ dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; -+ dev_info->pci_dev = dev->pci_dev; -+/* dev_info->rx_offload_capa = -+ DEV_RX_OFFLOAD_IPV4_CKSUM | -+ DEV_RX_OFFLOAD_UDP_CKSUM | -+ DEV_RX_OFFLOAD_TCP_CKSUM; -+ dev_info->tx_offload_capa = -+ DEV_TX_OFFLOAD_IPV4_CKSUM | -+ DEV_TX_OFFLOAD_UDP_CKSUM | -+ DEV_TX_OFFLOAD_TCP_CKSUM | -+ DEV_TX_OFFLOAD_SCTP_CKSUM; -+*/ -+} -+ -+static int -+dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ uint8_t tc_idx; -+ uint16_t dist_idx; -+ uint32_t vq_id; -+ struct dpaa2_queue *mc_q, *mcq; -+ uint32_t tot_queues; -+ int i; -+ struct dpaa2_queue *dpaa2_q; -+ -+ tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; -+ mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, -+ RTE_CACHE_LINE_SIZE); -+ if (!mc_q) { -+ PMD_DRV_LOG(ERR, "malloc failed for rx/tx queues\n"); -+ return -1; -+ } -+ -+ for (i = 0; i < priv->nb_rx_queues; i++) { -+ mc_q->dev = dev; -+ priv->rx_vq[i] = mc_q++; -+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; -+ dpaa2_q->q_storage = rte_malloc("dq_storage", -+ sizeof(struct queue_storage_info_t), -+ RTE_CACHE_LINE_SIZE); -+ if (!dpaa2_q->q_storage) -+ goto fail; -+ -+ memset(dpaa2_q->q_storage, 0, sizeof(struct queue_storage_info_t)); -+ } -+ -+ for (i = 0; i < priv->nb_tx_queues; i++) { -+ mc_q->dev = dev; -+ mc_q->flow_id = DPNI_NEW_FLOW_ID; -+ priv->tx_vq[i] = mc_q++; -+ } -+ -+ vq_id = 0; -+ for (tc_idx = 0; tc_idx < priv->num_tc; tc_idx++) { -+ for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[tc_idx]; dist_idx++) { -+ mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; -+ mcq->tc_index = tc_idx; -+ mcq->flow_id = dist_idx; -+ vq_id++; -+ } -+ } -+ -+ return 0; -+fail: -+ i -= 1; -+ while (i >= 0) { -+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; -+ rte_free(dpaa2_q->q_storage); -+ } -+ return -1; -+} -+ -+static void dpaa2_distset_to_dpkg_profile_cfg( -+ uint32_t req_dist_set, -+ struct dpkg_profile_cfg *kg_cfg) -+{ -+ uint32_t loop = 0, i = 0, dist_field = 0; -+ int l2_configured = 0, l3_configured = 0; -+ int l4_configured = 0, sctp_configured = 0; -+ -+ memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); -+ while (req_dist_set) { -+ if (req_dist_set % 2 != 0) { -+ dist_field = 1U << loop; -+ switch (dist_field) { -+ case ETH_RSS_L2_PAYLOAD: -+ -+ if (l2_configured) -+ break; -+ l2_configured = 1; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_ETH; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_ETH_TYPE; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ break; -+ -+ case ETH_RSS_IPV4: -+ case ETH_RSS_FRAG_IPV4: -+ case ETH_RSS_NONFRAG_IPV4_OTHER: -+ case ETH_RSS_IPV6: -+ case ETH_RSS_FRAG_IPV6: -+ case ETH_RSS_NONFRAG_IPV6_OTHER: -+ case ETH_RSS_IPV6_EX: -+ -+ if (l3_configured) -+ break; -+ l3_configured = 1; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_IP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_IP_SRC; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_IP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_IP_DST; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_IP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_IP_PROTO; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ kg_cfg->num_extracts++; -+ i++; -+ break; -+ -+ case ETH_RSS_NONFRAG_IPV4_TCP: -+ case ETH_RSS_NONFRAG_IPV6_TCP: -+ case ETH_RSS_NONFRAG_IPV4_UDP: -+ case ETH_RSS_NONFRAG_IPV6_UDP: -+ -+ if (l4_configured) -+ break; -+ l4_configured = 1; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_TCP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_TCP_PORT_SRC; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_TCP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_TCP_PORT_SRC; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ break; -+ -+ case ETH_RSS_NONFRAG_IPV4_SCTP: -+ case ETH_RSS_NONFRAG_IPV6_SCTP: -+ -+ if (sctp_configured) -+ break; -+ sctp_configured = 1; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_SCTP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_SCTP_PORT_SRC; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ -+ kg_cfg->extracts[i].extract.from_hdr.prot = -+ NET_PROT_SCTP; -+ kg_cfg->extracts[i].extract.from_hdr.field = -+ NH_FLD_SCTP_PORT_DST; -+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR; -+ kg_cfg->extracts[i].extract.from_hdr.type = -+ DPKG_FULL_FIELD; -+ i++; -+ break; -+ -+ default: -+ PMD_DRV_LOG(WARNING, "Bad flow distribution option %x\n", dist_field); -+ } -+ } -+ req_dist_set = req_dist_set >> 1; -+ loop++; -+ } -+ kg_cfg->num_extracts = i; -+} -+ -+static int dpaa2_setup_flow_distribution(struct rte_eth_dev *eth_dev, -+ uint32_t req_dist_set) -+{ -+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; -+ struct fsl_mc_io *dpni = priv->hw; -+ struct dpni_rx_tc_dist_cfg tc_cfg; -+ struct dpkg_profile_cfg kg_cfg; -+ void *p_params; -+ int ret, tc_index = 0; -+ -+ p_params = rte_malloc( -+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); -+ if (!p_params) { -+ PMD_DRV_LOG(ERR, "Memory unavaialble\n"); -+ return -ENOMEM; -+ } -+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE); -+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); -+ -+ dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg); -+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); -+ tc_cfg.dist_size = eth_dev->data->nb_rx_queues; -+ tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; -+ -+ ret = dpni_prepare_key_cfg(&kg_cfg, p_params); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Unable to prepare extract parameters\n"); -+ rte_free(p_params); -+ return ret; -+ } -+ -+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, -+ &tc_cfg); -+ rte_free(p_params); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Setting distribution for Rx failed with" -+ "err code: %d\n", ret); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int -+dpaa2_remove_flow_distribution(struct rte_eth_dev *eth_dev, uint8_t tc_index) -+{ -+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; -+ struct fsl_mc_io *dpni = priv->hw; -+ struct dpni_rx_tc_dist_cfg tc_cfg; -+ struct dpkg_profile_cfg kg_cfg; -+ void *p_params; -+ int ret; -+ -+ p_params = rte_malloc( -+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); -+ if (!p_params) { -+ PMD_DRV_LOG(ERR, "Memory unavaialble\n"); -+ return -ENOMEM; -+ } -+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE); -+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); -+ -+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); -+ tc_cfg.dist_size = 0; -+ tc_cfg.dist_mode = DPNI_DIST_MODE_NONE; -+ -+ ret = dpni_prepare_key_cfg(&kg_cfg, p_params); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Unable to prepare extract parameters\n"); -+ rte_free(p_params); -+ return ret; -+ } -+ -+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, -+ &tc_cfg); -+ rte_free(p_params); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Setting distribution for Rx failed with" -+ "err code: %d\n", ret); -+ return ret; -+ } -+ return ret; -+} -+ -+static int -+dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage) -+{ -+ int i = 0; -+ -+ for (i = 0; i < NUM_DQS_PER_QUEUE; i++) { -+ q_storage->dq_storage[i] = rte_malloc(NULL, -+ NUM_MAX_RECV_FRAMES * sizeof(struct qbman_result), -+ RTE_CACHE_LINE_SIZE); -+ if (!q_storage->dq_storage[i]) -+ goto fail; -+ /*setting toggle for initial condition*/ -+ q_storage->toggle = -1; -+ } -+ return 0; -+fail: -+ i -= 1; -+ while (i >= 0) -+ rte_free(q_storage->dq_storage[i]); -+ -+ return -1; -+} -+ -+static int -+dpaa2_eth_dev_configure(struct rte_eth_dev *dev) -+{ -+ struct rte_eth_dev_data *data = dev->data; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct rte_eth_conf *eth_conf = &data->dev_conf; -+ struct dpaa2_queue *dpaa2_q; -+ int i, ret; -+ -+ for (i = 0; i < data->nb_rx_queues; i++) { -+ data->rx_queues[i] = priv->rx_vq[i]; -+ dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; -+ if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) -+ return -1; -+ } -+ -+ for (i = 0; i < data->nb_tx_queues; i++) { -+ data->tx_queues[i] = priv->tx_vq[i]; -+ dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; -+ dpaa2_q->cscn = rte_malloc(NULL, sizeof(struct qbman_result), 16); -+ if (!dpaa2_q->cscn) -+ goto fail_tx_queue; -+ } -+ -+ /* Check for correct configuration */ -+ if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS && -+ data->nb_rx_queues > 1) { -+ PMD_DRV_LOG(ERR, "Distribution is not enabled, " -+ "but Rx queues more than 1\n"); -+ return -1; -+ } -+ -+ if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { -+ /* Return in case number of Rx queues is 1 */ -+ if (data->nb_rx_queues == 1) -+ return 0; -+ ret = dpaa2_setup_flow_distribution(dev, -+ eth_conf->rx_adv_conf.rss_conf.rss_hf); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "dpaa2_setup_flow_distribution failed\n"); -+ return ret; -+ } -+ } -+ -+ return 0; -+ fail_tx_queue: -+ i -= 1; -+ while (i >= 0) { -+ dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; -+ rte_free(dpaa2_q->cscn); -+ } -+ return -1; -+} -+ -+static int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, -+ void *blist) -+{ -+ /* Function to attach a DPNI with a buffer pool list. Buffer pool list -+ * handle is passed in blist. -+ */ -+ int32_t retcode; -+ struct fsl_mc_io *dpni = priv->hw; -+ struct dpni_pools_cfg bpool_cfg; -+ struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist; -+ struct dpni_buffer_layout layout; -+ int tot_size; -+ -+ /* ... rx buffer layout . -+ Check alignment for buffer layouts first*/ -+ -+ /* ... rx buffer layout ... */ -+ tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM; -+ tot_size = RTE_ALIGN_CEIL(tot_size, -+ DPAA2_PACKET_LAYOUT_ALIGN); -+ -+ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); -+ layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; -+ -+ layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION; -+ retcode = dpni_set_rx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, -+ &layout); -+ if (retcode) { -+ PMD_DRV_LOG(ERR, "Err(%d) in setting rx buffer layout\n", retcode); -+ return retcode; -+ } -+ -+ /*Attach buffer pool to the network interface as described by the user*/ -+ bpool_cfg.num_dpbp = 1; -+ bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id; -+ bpool_cfg.pools[0].backup_pool = 0; -+ bpool_cfg.pools[0].buffer_size = -+ RTE_ALIGN_CEIL(bp_list->buf_pool.size, -+ 256 /*DPAA2_PACKET_LAYOUT_ALIGN*/); -+ -+ retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg); -+ if (retcode != 0) { -+ PMD_DRV_LOG(ERR, "Error in attaching the buffer pool list" -+ "bpid = %d Error code = %d\n", -+ bpool_cfg.pools[0].dpbp_id, retcode); -+ return retcode; -+ } -+ -+ priv->bp_list = bp_list; -+ return 0; -+} -+ -+/* Function to setup RX flow information. It contains traffic class ID, -+ * flow ID, destination configuration etc. -+ */ -+static int -+dpaa2_rx_queue_setup(struct rte_eth_dev *dev, -+ uint16_t rx_queue_id, -+ uint16_t nb_rx_desc __rte_unused, -+ unsigned int socket_id __rte_unused, -+ const struct rte_eth_rxconf *rx_conf __rte_unused, -+ struct rte_mempool *mb_pool) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ struct dpaa2_queue *dpaa2_q; -+ struct dpni_queue_cfg cfg; -+ uint8_t tc_id, flow_id; -+ uint32_t bpid; -+ int ret; -+ -+ PMD_DRV_LOG(INFO, "dev =%p, queue =%d, pool = %p, conf =%p", -+ dev, rx_queue_id, mb_pool, rx_conf); -+ -+ if (!priv->bp_list || priv->bp_list->mp != mb_pool) { -+ RTE_VERIFY(mb_pool->pool_data); -+ bpid = mempool_to_bpid(mb_pool); -+ ret = dpaa2_attach_bp_list(priv, -+ bpid_info[bpid].bp_list); -+ if (ret) -+ return ret; -+ } -+ dpaa2_q = (struct dpaa2_queue *)dev->data->rx_queues[rx_queue_id]; -+ -+ /*Get the tc id and flow id from given VQ id*/ -+ tc_id = rx_queue_id / MAX_DIST_PER_TC; -+ flow_id = rx_queue_id % MAX_DIST_PER_TC; -+ memset(&cfg, 0, sizeof(struct dpni_queue_cfg)); -+ -+ cfg.options = cfg.options | DPNI_QUEUE_OPT_USER_CTX; -+ -+#ifdef DPAA2_STASHING -+ cfg.options = cfg.options | DPNI_QUEUE_OPT_FLC; -+#endif -+ -+ cfg.user_ctx = (uint64_t)(dpaa2_q); -+#ifdef DPAA2_STASHING -+ cfg.flc_cfg.flc_type = DPNI_FLC_STASH; -+ cfg.flc_cfg.frame_data_size = DPNI_STASH_SIZE_64B; -+ /* Enabling Annotation stashing */ -+ cfg.options |= DPNI_FLC_STASH_FRAME_ANNOTATION; -+ cfg.flc_cfg.options = DPNI_FLC_STASH_FRAME_ANNOTATION; -+#endif -+ ret = dpni_set_rx_flow(dpni, CMD_PRI_LOW, priv->token, -+ tc_id, flow_id, &cfg); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); -+ return -1; -+ } -+ return 0; -+} -+ -+static int -+dpaa2_tx_queue_setup(struct rte_eth_dev *dev, -+ uint16_t tx_queue_id, -+ uint16_t nb_tx_desc __rte_unused, -+ unsigned int socket_id __rte_unused, -+ const struct rte_eth_txconf *tx_conf __rte_unused) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) -+ dev->data->tx_queues[tx_queue_id]; -+ struct fsl_mc_io *dpni = priv->hw; -+ struct dpni_tx_flow_cfg cfg; -+ struct dpni_tx_conf_cfg tx_conf_cfg; -+#ifdef DPAA2_CGR_SUPPORT -+ struct dpni_congestion_notification_cfg cong_notif_cfg; -+#endif -+ uint32_t tc_idx; -+ int ret; -+ -+ PMD_INIT_FUNC_TRACE(); -+ -+ /* Return if queue already configured */ -+ if (dpaa2_q->flow_id != DPNI_NEW_FLOW_ID) -+ return 0; -+ -+ memset(&cfg, 0, sizeof(struct dpni_tx_flow_cfg)); -+ cfg.l3_chksum_gen = 1; -+ cfg.options |= DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN; -+ cfg.l4_chksum_gen = 1; -+ cfg.options = DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN; -+ memset(&tx_conf_cfg, 0, sizeof(struct dpni_tx_conf_cfg)); -+ tx_conf_cfg.errors_only = TRUE; -+ -+ /* -+ if (action & DPAA2BUF_TX_CONF_REQUIRED) { -+ cfg.options = DPNI_TX_FLOW_OPT_TX_CONF_ERROR; -+ cfg.use_common_tx_conf_queue = -+ ((action & DPAA2BUF_TX_CONF_ERR_ON_COMMON_Q) ? -+ TRUE : FALSE); -+ tx_conf_cfg.errors_only = FALSE; -+ }*/ -+ -+ if (priv->num_tc == 1) -+ tc_idx = 0; -+ else -+ tc_idx = tx_queue_id; -+ -+ ret = dpni_set_tx_flow(dpni, CMD_PRI_LOW, priv->token, -+ &(dpaa2_q->flow_id), &cfg); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error in setting the tx flow:" -+ "ErrorCode = %x\n", ret); -+ return -1; -+ } -+ /*Set tx-conf and error configuration*/ -+ ret = dpni_set_tx_conf(dpni, CMD_PRI_LOW, priv->token, -+ dpaa2_q->flow_id, &tx_conf_cfg); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error in setting tx conf settings: " -+ "ErrorCode = %x", ret); -+ return -1; -+ } -+ -+ if (tx_queue_id == 0) { -+ /*Set tx-conf and error configuration*/ -+ ret = dpni_set_tx_conf(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_COMMON_TX_CONF, &tx_conf_cfg); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error in setting tx conf settings: " -+ "ErrorCode = %x", ret); -+ return -1; -+ } -+ } -+ dpaa2_q->tc_index = tc_idx; -+ -+#ifdef DPAA2_CGR_SUPPORT -+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES; -+ /*Notify about congestion when the queue size is 128 frames with each \ -+ frame 64 bytes size*/ -+ cong_notif_cfg.threshold_entry = CONG_ENTER_THRESHOLD; -+ /*Notify that the queue is not congested when the number of frames in \ -+ the queue is below this thershold. -+ TODO: Check if this value is the optimum value for better performance*/ -+ cong_notif_cfg.threshold_exit = CONG_EXIT_THRESHOLD; -+ cong_notif_cfg.message_ctx = 0; -+ cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn; -+ cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; -+ cong_notif_cfg.options = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | -+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | DPNI_CONG_OPT_COHERENT_WRITE; -+ -+ ret = dpni_set_tx_tc_congestion_notification(dpni, CMD_PRI_LOW, -+ priv->token, -+ tc_idx, &cong_notif_cfg); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error in setting tx congestion notification " -+ "settings: ErrorCode = %x", ret); -+ return -1; -+ } -+#endif -+ return 0; -+} -+ -+void -+dpaa2_rx_queue_release(void *q) -+{ -+ printf("\n(%s) called for 1=%p\n", __func__, q); -+ return; -+} -+ -+void -+dpaa2_tx_queue_release(void *q) -+{ -+ printf("\n(%s) called for 1=%p\n", __func__, q); -+ return; -+} -+ -+static const uint32_t * -+dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) -+{ -+ static const uint32_t ptypes[] = { -+ /*todo -= add more types */ -+ RTE_PTYPE_L2_ETHER, -+ RTE_PTYPE_L3_IPV4, -+ RTE_PTYPE_L3_IPV4_EXT, -+ RTE_PTYPE_L3_IPV6, -+ RTE_PTYPE_L3_IPV6_EXT, -+ RTE_PTYPE_L4_TCP, -+ RTE_PTYPE_L4_UDP, -+ RTE_PTYPE_L4_SCTP, -+ RTE_PTYPE_L4_ICMP, -+ RTE_PTYPE_UNKNOWN -+ }; -+ -+ if (dev->rx_pkt_burst == eth_dpaa2_prefetch_rx || -+ dev->rx_pkt_burst == eth_dpaa2_rx) -+ return ptypes; -+ return NULL; -+} -+ -+static int -+dpaa2_dev_start(struct rte_eth_dev *dev) -+{ -+ struct rte_eth_dev_data *data = dev->data; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ struct dpni_queue_attr cfg; -+ uint16_t qdid; -+ struct dpaa2_queue *dpaa2_q; -+ int ret, i, mask = 0; -+ -+ PMD_INIT_FUNC_TRACE(); -+ -+ dev->data->dev_link.link_status = 1; -+ -+ ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure %d in enabling dpni %d device\n", -+ ret, priv->hw_id); -+ return ret; -+ } -+ -+ ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, &qdid); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); -+ return ret; -+ } -+ priv->qdid = qdid; -+ -+ for (i = 0; i < data->nb_rx_queues; i++) { -+ dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; -+ ret = dpni_get_rx_flow(dpni, CMD_PRI_LOW, priv->token, -+ dpaa2_q->tc_index, dpaa2_q->flow_id, &cfg); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error to get flow " -+ "information Error code = %d\n", ret); -+ return ret; -+ } -+ dpaa2_q->fqid = cfg.fqid; -+ } -+ /* -+ * VLAN Offload Settings -+ */ -+ if (priv->options & DPNI_OPT_VLAN_FILTER) -+ mask = ETH_VLAN_FILTER_MASK; -+ -+ if (priv->options & DPNI_OPT_VLAN_MANIPULATION) -+ mask = ETH_VLAN_STRIP_MASK; -+ -+ if (mask) -+ dpaa2_vlan_offload_set(dev, mask); -+ -+ return 0; -+} -+ -+/********************************************************************* -+ * -+ * This routine disables all traffic on the adapter by issuing a -+ * global reset on the MAC. -+ * -+ **********************************************************************/ -+static void -+dpaa2_dev_stop(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ int ret; -+ struct rte_eth_link link; -+ -+ dev->data->dev_link.link_status = 0; -+ -+ ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in disabling dpni %d device\n", priv->hw_id); -+ return; -+ } -+ -+ /* clear the recorded link status */ -+ memset(&link, 0, sizeof(link)); -+ rte_dpni_dev_atomic_write_link_status(dev, &link); -+} -+ -+static void -+dpaa2_dev_close(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ int ret; -+ struct rte_eth_link link; -+ -+ /*Function is reverse of dpaa2_dev_init. -+ * It does the following: -+ * 1. Detach a DPNI from attached resources i.e. buffer pools, dpbp_id. -+ * 2. Close the DPNI device -+ * 3. Free the allocated reqources. -+ */ -+ -+ /* Clean the device first */ -+ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure cleaning dpni device with" -+ "error code %d\n", ret); -+ return; -+ } -+ -+ /*Close the device at underlying layer*/ -+ ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure closing dpni device with" -+ "error code %d\n", ret); -+ return; -+ } -+ -+ /*Free the allocated memory for ethernet private data and dpni*/ -+ priv->hw = NULL; -+ free(dpni); -+ -+ memset(&link, 0, sizeof(link)); -+ rte_dpni_dev_atomic_write_link_status(dev, &link); -+} -+ -+static void -+dpaa2_dev_promiscuous_enable( -+ struct rte_eth_dev *dev) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, TRUE); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode"); -+ return; -+} -+ -+static void -+dpaa2_dev_promiscuous_disable( -+ struct rte_eth_dev *dev) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, FALSE); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to disable promiscuous mode"); -+ return; -+} -+ -+static void -+dpaa2_dev_allmulticast_enable( -+ struct rte_eth_dev *dev) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode"); -+ return; -+} -+ -+static void -+dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); -+ if (ret < 0) -+ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode"); -+ return; -+} -+ -+static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return -EINVAL; -+ } -+ -+ /* check that mtu is within the allowed range */ -+ if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) -+ return -EINVAL; -+ -+ /* Set the Max Rx frame length as 'mtu' + -+ * Maximum Ethernet header length */ -+ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, -+ mtu + ETH_VLAN_HLEN); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "setting the max frame length failed"); -+ return -1; -+ } -+ if (priv->options & DPNI_OPT_IPF) { -+ ret = dpni_set_mtu(dpni, CMD_PRI_LOW, priv->token, mtu); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Setting the MTU failed"); -+ return -1; -+ } -+ } -+ -+ PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu); -+ return 0; -+} -+ -+static int -+dpaa2_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused, -+ struct rte_eth_fc_conf *fc_conf __rte_unused) -+{ -+ return 0; -+} -+static void -+dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, -+ struct ether_addr *addr, -+ __rte_unused uint32_t index, -+ __rte_unused uint32_t pool) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, -+ priv->token, addr->addr_bytes); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Adding the MAC ADDR failed"); -+ } -+ -+ return; -+} -+ -+static void -+dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, -+ uint32_t index) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ struct rte_eth_dev_data *data = dev->data; -+ struct ether_addr *macaddr; -+ -+ macaddr = &data->mac_addrs[index]; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, -+ priv->token, macaddr->addr_bytes); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Removing the MAC ADDR failed"); -+ } -+ -+ return; -+} -+ -+static void -+dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, -+ struct ether_addr *addr) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, -+ priv->token, addr->addr_bytes); -+ -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Setting the MAC ADDR failed"); -+ } -+ -+ return; -+} -+ -+int dpaa2_dev_get_mac_addr(struct rte_eth_dev *dev, -+ struct ether_addr *addr) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return -EINVAL; -+ } -+ -+ ret = dpni_get_primary_mac_addr(dpni, CMD_PRI_LOW, -+ priv->token, addr->addr_bytes); -+ -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Getting the MAC ADDR failed"); -+ } -+ -+ return ret; -+} -+ -+/*int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast) -+ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+ -+dpni_set_errors_behavior -+ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+ -+*/ -+ -+static int dpaa2_timestamp_enable(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ struct dpni_buffer_layout layout; -+ int ret; -+ -+ layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; -+ layout.pass_timestamp = TRUE; -+ -+ ret = dpni_set_rx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Enabling timestamp for Rx failed with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ ret = dpni_set_tx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Enabling timestamp failed for Tx with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ ret = dpni_set_tx_conf_buffer_layout(dpni, CMD_PRI_LOW, -+ priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Enabling timestamp failed for Tx-conf with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_timestamp_disable(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ struct dpni_buffer_layout layout; -+ int ret; -+ -+ layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; -+ layout.pass_timestamp = FALSE; -+ -+ ret = dpni_set_rx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Rx with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ ret = dpni_set_tx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Tx with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ ret = dpni_set_tx_conf_buffer_layout(dpni, CMD_PRI_LOW, -+ priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Tx-conf with" -+ "err code: %d", ret); -+ return ret; -+ } -+ -+ return ret; -+} -+ -+/* return 0 means link status changed, -1 means not changed */ -+static int -+dpaa2_dev_get_link_info(struct rte_eth_dev *dev, -+ int wait_to_complete __rte_unused) -+{ -+ int ret; -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ struct rte_eth_link link, old; -+ struct dpni_link_state state = {0}; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return 0; -+ } -+ memset(&old, 0, sizeof(old)); -+ rte_dpni_dev_atomic_read_link_status(dev, &old); -+ -+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); -+ if (ret < 0) { -+ PMD_DRV_LOG(ERR, "dpni_get_link_state"); -+ return 0; -+ } -+ -+ if (state.up == 0) { -+ rte_dpni_dev_atomic_write_link_status(dev, &link); -+ if (state.up == old.link_status) -+ return -1; -+ return 0; -+ } -+ link.link_status = state.up; -+ link.link_speed = state.rate; -+ -+ if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) -+ link.link_duplex = ETH_LINK_HALF_DUPLEX; -+ else -+ link.link_duplex = ETH_LINK_FULL_DUPLEX; -+ -+ rte_dpni_dev_atomic_write_link_status(dev, &link); -+ -+ if (link.link_status == old.link_status) -+ return -1; -+ -+ return 0; -+} -+ -+static -+void dpaa2_dev_stats_get(struct rte_eth_dev *dev, -+ struct rte_eth_stats *stats) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ int32_t retcode; -+ uint64_t value; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ if (!stats) { -+ PMD_DRV_LOG(ERR, "stats is NULL"); -+ return; -+ } -+ -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME, &value); -+ if (retcode) -+ goto error; -+ stats->ipackets = value; -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_BYTE, &value); -+ if (retcode) -+ goto error; -+ stats->ibytes = value; -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME_DROP, &value); -+ if (retcode) -+ goto error; -+ stats->ierrors = value; -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME_DISCARD, &value); -+ if (retcode) -+ goto error; -+ stats->ierrors = stats->ierrors + value; -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_FRAME, &value); -+ if (retcode) -+ goto error; -+ stats->opackets = value; -+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_BYTE, &value); -+ if (retcode) -+ goto error; -+ stats->obytes = value; -+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_FRAME_DISCARD, &value); -+ if (retcode) -+ goto error; -+ stats->oerrors = value; -+ -+ return; -+ -+error: -+ PMD_DRV_LOG(ERR, "Operation not completed:Error Code = %d\n", retcode); -+ return; -+}; -+ -+static -+void dpaa2_dev_stats_reset(struct rte_eth_dev *dev) -+{ -+ struct dpaa2_dev_priv *priv = dev->data->dev_private; -+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; -+ -+ int32_t retcode; -+ -+ if (dpni == NULL) { -+ PMD_DRV_LOG(ERR, "dpni is NULL"); -+ return; -+ } -+ -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_BYTE, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_BCAST_FRAME, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_BCAST_BYTES, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_MCAST_FRAME, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_MCAST_BYTE, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME_DROP, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_ING_FRAME_DISCARD, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_FRAME, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_BYTE, 0); -+ if (retcode) -+ goto error; -+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token, -+ DPNI_CNT_EGR_FRAME_DISCARD, 0); -+ if (retcode) -+ goto error; -+ -+ return; -+ -+error: -+ PMD_DRV_LOG(ERR, "Operation not completed:Error Code = %d\n", retcode); -+ return; -+}; -+ -+static struct eth_dev_ops ops = { -+ .dev_configure = dpaa2_eth_dev_configure, -+ .dev_start = dpaa2_dev_start, -+ .dev_stop = dpaa2_dev_stop, -+ .dev_close = dpaa2_dev_close, -+ .promiscuous_enable = dpaa2_dev_promiscuous_enable, -+ .promiscuous_disable = dpaa2_dev_promiscuous_disable, -+ .allmulticast_enable = dpaa2_dev_allmulticast_enable, -+ .allmulticast_disable = dpaa2_dev_allmulticast_disable, -+ .dev_set_link_up = NULL, -+ .dev_set_link_down = NULL, -+ .link_update = dpaa2_dev_get_link_info, -+ .stats_get = dpaa2_dev_stats_get, -+ .stats_reset = dpaa2_dev_stats_reset, -+ .dev_infos_get = dpaa2_eth_dev_info, -+ .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, -+ .mtu_set = dpaa2_dev_mtu_set, -+ .vlan_filter_set = dpaa2_vlan_filter_set, -+ .vlan_tpid_set = NULL, -+ .vlan_offload_set = dpaa2_vlan_offload_set, -+ .vlan_strip_queue_set = NULL, -+ .vlan_pvid_set = NULL, -+ .rx_queue_setup = dpaa2_rx_queue_setup, -+ .rx_queue_release = dpaa2_rx_queue_release, -+ .tx_queue_setup = dpaa2_tx_queue_setup, -+ .tx_queue_release = dpaa2_tx_queue_release, -+ .dev_led_on = NULL, -+ .dev_led_off = NULL, -+ .set_queue_rate_limit = NULL, -+ .flow_ctrl_get = NULL, -+ .flow_ctrl_set = dpaa2_flow_ctrl_set, -+ .priority_flow_ctrl_set = NULL, -+ .mac_addr_add = dpaa2_dev_add_mac_addr, -+ .mac_addr_remove = dpaa2_dev_remove_mac_addr, -+ .rxq_info_get = NULL, -+ .txq_info_get = NULL, -+ .timesync_enable = dpaa2_timestamp_enable, -+ .timesync_disable = dpaa2_timestamp_disable, -+ .mac_addr_set = dpaa2_dev_set_mac_addr, -+}; -+ -+static int -+dpaa2_dev_init(struct rte_eth_dev *eth_dev) -+{ -+ struct rte_eth_dev_data *data = eth_dev->data; -+ struct fsl_mc_io *dpni_dev; -+ struct dpni_attr attr; -+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; -+ struct dpni_buffer_layout layout; -+ int i, ret, hw_id = eth_dev->pci_dev->addr.devid; -+ struct dpni_extended_cfg *ext_cfg = NULL; -+ int tot_size; -+ -+ PMD_INIT_FUNC_TRACE(); -+ -+ dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io)); -+ if (!dpni_dev) { -+ PMD_DRV_LOG(ERR, "malloc failed for dpni device\n"); -+ return -1; -+ } -+ -+ dpni_dev->regs = mcp_ptr_list[0]; -+ ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in opening dpni@%d device with" -+ "error code %d\n", hw_id, ret); -+ return -1; -+ } -+ -+ /* Clean the device first */ -+ ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure cleaning dpni@%d device with" -+ "error code %d\n", hw_id, ret); -+ return -1; -+ } -+ -+ ext_cfg = (struct dpni_extended_cfg *)rte_malloc(NULL, 256, -+ RTE_CACHE_LINE_SIZE); -+ if (!ext_cfg) { -+ PMD_DRV_LOG(ERR, "No data memory\n"); -+ return -1; -+ } -+ attr.ext_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(ext_cfg)); -+ -+ ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Failure in getting dpni@%d attribute, " -+ "error code %d\n", hw_id, ret); -+ return -1; -+ } -+ -+ priv->num_tc = attr.max_tcs; -+ for (i = 0; i < attr.max_tcs; i++) { -+ priv->num_dist_per_tc[i] = ext_cfg->tc_cfg[i].max_dist; -+ priv->nb_rx_queues += priv->num_dist_per_tc[i]; -+ break; -+ } -+ if (attr.max_tcs == 1) -+ priv->nb_tx_queues = attr.max_senders; -+ else -+ priv->nb_tx_queues = attr.max_tcs; -+ PMD_DRV_LOG(INFO, "num_tc %d", priv->num_tc); -+ PMD_DRV_LOG(INFO, "nb_rx_queues %d", priv->nb_rx_queues); -+ -+ eth_dev->data->nb_rx_queues = priv->nb_rx_queues; -+ eth_dev->data->nb_tx_queues = priv->nb_tx_queues; -+ -+ priv->hw = dpni_dev; -+ priv->hw_id = hw_id; -+ priv->options = attr.options; -+ -+ priv->max_unicast_filters = attr.max_unicast_filters; -+ priv->max_multicast_filters = attr.max_multicast_filters; -+ -+ if (attr.options & DPNI_OPT_VLAN_FILTER) -+ priv->max_vlan_filters = attr.max_vlan_filters; -+ else -+ priv->max_vlan_filters = 0; -+ -+ ret = dpaa2_alloc_rx_tx_queues(eth_dev); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); -+ return -1; -+ } -+ -+ data->mac_addrs = (struct ether_addr *)malloc(sizeof(struct ether_addr)); -+ -+ /* Allocate memory for storing MAC addresses */ -+ eth_dev->data->mac_addrs = rte_zmalloc("dpni", -+ ETHER_ADDR_LEN * attr.max_unicast_filters, 0); -+ if (eth_dev->data->mac_addrs == NULL) { -+ PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to " -+ "store MAC addresses", -+ ETHER_ADDR_LEN * attr.max_unicast_filters); -+ return -ENOMEM; -+ } -+ -+ ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, -+ priv->token, -+ (uint8_t *)(data->mac_addrs[0].addr_bytes)); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "DPNI get mac address failed:" -+ " Error Code = %d\n", ret); -+ return -1; -+ } -+ -+ PMD_DRV_LOG(INFO, "Adding Broadcast Address..."); -+ memset(data->mac_addrs[1].addr_bytes, 0xff, ETH_ADDR_LEN); -+ ret = dpni_add_mac_addr(dpni_dev, CMD_PRI_LOW, -+ priv->token, -+ (uint8_t *)(data->mac_addrs[1].addr_bytes)); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "DPNI set broadcast mac address failed:" -+ " Error Code = %0x\n", ret); -+ return -1; -+ } -+ -+ /* ... rx buffer layout ... */ -+ tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM; -+ tot_size = RTE_ALIGN_CEIL(tot_size, -+ DPAA2_PACKET_LAYOUT_ALIGN); -+ -+ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); -+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | -+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP | -+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | -+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | -+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; -+ -+ layout.pass_frame_status = 1; -+ layout.data_head_room = tot_size -+ - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION; -+ layout.private_data_size = DPAA2_FD_PTA_SIZE; -+ layout.pass_timestamp = 1; -+ layout.pass_parser_result = 1; -+ PMD_DRV_LOG(INFO, "Tot_size = %d, head room = %d, private = %d", -+ tot_size, layout.data_head_room, layout.private_data_size); -+ ret = dpni_set_rx_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, -+ &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Err(%d) in setting rx buffer layout\n", ret); -+ return -1; -+ } -+ -+ /* ... tx buffer layout ... */ -+ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); -+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | -+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP; -+ layout.pass_frame_status = 1; -+ layout.pass_timestamp = 1; -+ ret = dpni_set_tx_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error (%d) in setting tx buffer layout\n", ret); -+ return -1; -+ } -+ -+ /* ... tx-conf and error buffer layout ... */ -+ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); -+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | -+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP; -+ layout.pass_frame_status = 1; -+ layout.pass_timestamp = 1; -+ ret = dpni_set_tx_conf_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, &layout); -+ if (ret) { -+ PMD_DRV_LOG(ERR, "Error (%d) in setting tx-conf buffer layout\n", ret); -+ return -1; -+ } -+ -+ /* TODO - Set the MTU if required */ -+ -+ eth_dev->dev_ops = &ops; -+ eth_dev->rx_pkt_burst = eth_dpaa2_prefetch_rx;/*eth_dpaa2_rx;*/ -+ eth_dev->tx_pkt_burst = eth_dpaa2_tx; -+ -+ rte_free(ext_cfg); -+ -+ return 0; -+} -+ -+static struct eth_driver rte_dpaa2_dpni = { -+ { -+ .name = "rte_dpaa2_dpni", -+ .id_table = pci_id_dpaa2_map, -+ }, -+ .eth_dev_init = dpaa2_dev_init, -+ .dev_private_size = sizeof(struct dpaa2_dev_priv), -+}; -+ -+static int -+rte_pmd_dpaa2_devinit( -+ const char *name __rte_unused, -+ const char *params __rte_unused) -+{ -+ PMD_DRV_LOG(INFO, "Initializing dpaa2_pmd for %s\n", name); -+ rte_eth_driver_register(&rte_dpaa2_dpni); -+ -+ return 0; -+} -+ -+static struct rte_driver pmd_dpaa2_drv = { -+ .name = "dpaa2_pmd", -+ .type = PMD_PDEV, -+ .init = rte_pmd_dpaa2_devinit, -+}; -+ -+PMD_REGISTER_DRIVER(pmd_dpaa2_drv, dpaa2); -diff --git a/drivers/net/dpaa2/rte_eth_dpni_annot.h b/drivers/net/dpaa2/rte_eth_dpni_annot.h -new file mode 100644 -index 0000000..0c3ae82 ---- /dev/null -+++ b/drivers/net/dpaa2/rte_eth_dpni_annot.h -@@ -0,0 +1,310 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+/** -+ * @file -+ * -+ * DPNI packet parse results - implementation internal -+ */ -+ -+#ifndef RTE_ETH_DPNI_ANNOT_H_ -+#define RTE_ETH_DPNI_ANNOT_H_ -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+/* Annotation valid bits in FD FRC */ -+#define DPAA2_FD_FRC_FASV 0x8000 -+#define DPAA2_FD_FRC_FAEADV 0x4000 -+#define DPAA2_FD_FRC_FAPRV 0x2000 -+#define DPAA2_FD_FRC_FAIADV 0x1000 -+#define DPAA2_FD_FRC_FASWOV 0x0800 -+#define DPAA2_FD_FRC_FAICFDV 0x0400 -+ -+/* Annotation bits in FD CTRL */ -+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ -+#define DPAA2_FD_CTRL_PTA 0x00800000 -+#define DPAA2_FD_CTRL_PTV1 0x00400000 -+ -+/* Frame annotation status */ -+struct dpaa2_fas { -+ uint8_t reserved; -+ uint8_t ppid; -+ __le16 ifpid; -+ __le32 status; -+} __packed; -+ -+/** -+ * Internal Packet annotation header -+ */ -+struct pkt_annotation { -+ /**< word1: Frame Annotation Status (8 bytes)*/ -+ uint64_t word1; -+ /**< word2: Time Stamp (8 bytes)*/ -+ uint64_t word2; -+ /**< word3: Next Hdr + FAF Extension + FAF (2 + 2 + 4 bytes)*/ -+ uint64_t word3; -+ /**< word4: Frame Annotation Flags-FAF (8 bytes) */ -+ uint64_t word4; -+ /**< word5: -+ ShimOffset_1 + ShimOffset_2 + IPPIDOffset + EthOffset + -+ LLC+SNAPOffset + VLANTCIOffset_1 + VLANTCIOffset_n + -+ LastETypeOffset (1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes) -+ */ -+ uint64_t word5; -+ /**< word6: -+ PPPoEOffset + MPLSOffset_1 + MPLSOffset_n + ARPorIPOffset_1 -+ + IPOffset_norMInEncapO + GREOffset + L4Offset + -+ GTPorESPorIPSecOffset(1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes) -+ */ -+ uint64_t word6; -+ /**< word7: -+ RoutingHdrOfset1 + RoutingHdrOfset2 + NxtHdrOffset + IPv6FragOffset + -+ GrossRunningSum + RunningSum(1 + 1 + 1 + 1 + 2 + 2 bytes) -+ */ -+ uint64_t word7; -+ /**< word8: -+ ParseErrorcode + Soft Parsing Context (1 + 7 bytes) -+ */ -+ uint64_t word8; /**< Layer 4 length */ -+}; -+ -+/** -+ * Internal Macros to get/set Packet annotation header -+ */ -+ -+/** General Macro to define a particular bit position*/ -+#define BIT_POS(x) ((uint64_t)1 << ((x))) -+/** Set a bit in the variable */ -+#define BIT_SET_AT_POS(var, pos) (var |= pos) -+/** Reset the bit in the variable */ -+#define BIT_RESET_AT_POS(var, pos) (var &= ~(pos)) -+/** Check the bit is set in the variable */ -+#define BIT_ISSET_AT_POS(var, pos) ((var & pos) ? 1 : 0) -+/** -+ * Macrso to define bit position in word3 -+ */ -+#define NEXT_HDR(var) ((uint64_t)var & 0xFFFF000000000000) -+#define FAF_EXTN_IPV6_ROUTE_HDR_PRESENT(var) BIT_POS(16) -+#define FAF_EXTN_RESERVED(var) ((uint64_t)var & 0x00007FFF00000000) -+#define FAF_USER_DEFINED_RESERVED(var) ((uint64_t)var & 0x00000000FF000000) -+#define SHIM_SHELL_SOFT_PARSING_ERRROR BIT_POS(23) -+#define PARSING_ERROR BIT_POS(22) -+#define L2_ETH_MAC_PRESENT BIT_POS(21) -+#define L2_ETH_MAC_UNICAST BIT_POS(20) -+#define L2_ETH_MAC_MULTICAST BIT_POS(19) -+#define L2_ETH_MAC_BROADCAST BIT_POS(18) -+#define L2_ETH_FRAME_IS_BPDU BIT_POS(17) -+#define L2_ETH_FCOE_PRESENT BIT_POS(16) -+#define L2_ETH_FIP_PRESENT BIT_POS(15) -+#define L2_ETH_PARSING_ERROR BIT_POS(14) -+#define L2_LLC_SNAP_PRESENT BIT_POS(13) -+#define L2_UNKNOWN_LLC_OUI BIT_POS(12) -+#define L2_LLC_SNAP_ERROR BIT_POS(11) -+#define L2_VLAN_1_PRESENT BIT_POS(10) -+#define L2_VLAN_N_PRESENT BIT_POS(9) -+#define L2_VLAN_CFI_BIT_PRESENT BIT_POS(8) -+#define L2_VLAN_PARSING_ERROR BIT_POS(7) -+#define L2_PPPOE_PPP_PRESENT BIT_POS(6) -+#define L2_PPPOE_PPP_PARSING_ERROR BIT_POS(5) -+#define L2_MPLS_1_PRESENT BIT_POS(4) -+#define L2_MPLS_N_PRESENT BIT_POS(3) -+#define L2_MPLS_PARSING_ERROR BIT_POS(2) -+#define L2_ARP_PRESENT BIT_POS(1) -+#define L2_ARP_PARSING_ERROR BIT_POS(0) -+/** -+ * Macrso to define bit position in word4 -+ */ -+#define L2_UNKNOWN_PROTOCOL BIT_POS(63) -+#define L2_SOFT_PARSING_ERROR BIT_POS(62) -+#define L3_IPV4_1_PRESENT BIT_POS(61) -+#define L3_IPV4_1_UNICAST BIT_POS(60) -+#define L3_IPV4_1_MULTICAST BIT_POS(59) -+#define L3_IPV4_1_BROADCAST BIT_POS(58) -+#define L3_IPV4_N_PRESENT BIT_POS(57) -+#define L3_IPV4_N_UNICAST BIT_POS(56) -+#define L3_IPV4_N_MULTICAST BIT_POS(55) -+#define L3_IPV4_N_BROADCAST BIT_POS(54) -+#define L3_IPV6_1_PRESENT BIT_POS(53) -+#define L3_IPV6_1_UNICAST BIT_POS(52) -+#define L3_IPV6_1_MULTICAST BIT_POS(51) -+#define L3_IPV6_N_PRESENT BIT_POS(50) -+#define L3_IPV6_N_UNICAST BIT_POS(49) -+#define L3_IPV6_N_MULTICAST BIT_POS(48) -+#define L3_IP_1_OPT_PRESENT BIT_POS(47) -+#define L3_IP_1_UNKNOWN_PROTOCOL BIT_POS(46) -+#define L3_IP_1_MORE_FRAGMENT BIT_POS(45) -+#define L3_IP_1_FIRST_FRAGMENT BIT_POS(44) -+#define L3_IP_1_PARSING_ERROR BIT_POS(43) -+#define L3_IP_N_OPT_PRESENT BIT_POS(42) -+#define L3_IP_N_UNKNOWN_PROTOCOL BIT_POS(41) -+#define L3_IP_N_MORE_FRAGMENT BIT_POS(40) -+#define L3_IP_N_FIRST_FRAGMENT BIT_POS(39) -+#define L3_PROTO_ICMP_PRESENT BIT_POS(38) -+#define L3_PROTO_IGMP_PRESENT BIT_POS(37) -+#define L3_PROTO_ICMPV6_PRESENT BIT_POS(36) -+#define L3_PROTO_UDP_LIGHT_PRESENT BIT_POS(35) -+#define L3_IP_N_PARSING_ERROR BIT_POS(34) -+#define L3_MIN_ENCAP_PRESENT BIT_POS(33) -+#define L3_MIN_ENCAP_SBIT_PRESENT BIT_POS(32) -+#define L3_MIN_ENCAP_PARSING_ERROR BIT_POS(31) -+#define L3_PROTO_GRE_PRESENT BIT_POS(30) -+#define L3_PROTO_GRE_RBIT_PRESENT BIT_POS(29) -+#define L3_PROTO_GRE_PARSING_ERROR BIT_POS(28) -+#define L3_IP_UNKNOWN_PROTOCOL BIT_POS(27) -+#define L3_SOFT_PARSING_ERROR BIT_POS(26) -+#define L3_PROTO_UDP_PRESENT BIT_POS(25) -+#define L3_PROTO_UDP_PARSING_ERROR BIT_POS(24) -+#define L3_PROTO_TCP_PRESENT BIT_POS(23) -+#define L3_PROTO_TCP_OPT_PRESENT BIT_POS(22) -+#define L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT BIT_POS(21) -+#define L3_PROTO_TCP_CTRL_BIT_3_TO_5_PRESENT BIT_POS(20) -+#define L3_PROTO_TCP_PARSING_ERROR BIT_POS(19) -+#define L3_PROTO_IPSEC_PRESENT BIT_POS(18) -+#define L3_PROTO_IPSEC_ESP_PRESENT BIT_POS(17) -+#define L3_PROTO_IPSEC_AH_PRESENT BIT_POS(16) -+#define L3_PROTO_IPSEC_PARSING_ERROR BIT_POS(15) -+#define L3_PROTO_SCTP_PRESENT BIT_POS(14) -+#define L3_PROTO_SCTP_PARSING_ERROR BIT_POS(13) -+#define L3_PROTO_DCCP_PRESENT BIT_POS(12) -+#define L3_PROTO_DCCP_PARSING_ERROR BIT_POS(11) -+#define L4_UNKNOWN_PROTOCOL BIT_POS(10) -+#define L4_SOFT_PARSING_ERROR BIT_POS(9) -+#define L3_PROTO_GTP_PRESENT BIT_POS(8) -+#define L3_PROTO_GTP_PARSING_ERROR BIT_POS(7) -+#define L3_PROTO_ESP_PRESENT BIT_POS(6) -+#define L3_PROTO_ESP_PARSING_ERROR BIT_POS(5) -+#define L3_PROTO_ISCSI_PRESENT BIT_POS(4) -+#define L3_PROTO_CAPWAN__CTRL_PRESENT BIT_POS(3) -+#define L3_PROTO_CAPWAN__DATA_PRESENT BIT_POS(2) -+#define L5_SOFT_PARSING_ERROR BIT_POS(1) -+#define L3_IPV6_ROUTE_HDR_PRESENT BIT_POS(0) -+ -+/** -+ * Macros to get values in word5 -+ */ -+#define SHIM_OFFSET_1(var) ((uint64_t)var & 0xFF00000000000000) -+#define SHIM_OFFSET_2(var) ((uint64_t)var & 0x00FF000000000000) -+#define IP_PID_OFFSET(var) ((uint64_t)var & 0x0000FF0000000000) -+#define ETH_OFFSET(var) ((uint64_t)var & 0x000000FF00000000) -+#define LLC_SNAP_OFFSET(var) ((uint64_t)var & 0x00000000FF000000) -+#define VLAN_TCI_OFFSET_1(var) ((uint64_t)var & 0x0000000000FF0000) -+#define VLAN_TCI_OFFSET_N(var) ((uint64_t)var & 0x000000000000FF00) -+#define LAST_ETYPE_OFFSET(var) ((uint64_t)var & 0x00000000000000FF) -+ -+/** -+ * Macros to get values in word6 -+ */ -+#define PPPOE_OFFSET(var) ((uint64_t)var & 0xFF00000000000000) -+#define MPLS_OFFSET_1(var) ((uint64_t)var & 0x00FF000000000000) -+#define MPLS_OFFSET_N(var) ((uint64_t)var & 0x0000FF0000000000) -+#define ARP_OR_IP_OFFSET_1(var) ((uint64_t)var & 0x000000FF00000000) -+#define IP_N_OR_MIN_ENCAP_OFFSET(var) ((uint64_t)var & 0x00000000FF000000) -+#define GRE_OFFSET(var) ((uint64_t)var & 0x0000000000FF0000) -+#define L4_OFFSET(var) ((uint64_t)var & 0x000000000000FF00) -+#define GTP_OR_ESP_OR_IPSEC_OFFSET(var) ((uint64_t)var & 0x00000000000000FF) -+ -+/** -+ * Macros to get values in word7 -+ */ -+#define IPV6_ROUTING_HDR_OFFSET_1(var) ((uint64_t)var & 0xFF00000000000000) -+#define IPV6_ROUTING_HDR_OFFSET_2(var) ((uint64_t)var & 0x00FF000000000000) -+#define NEXT_HDR_OFFSET(var) ((uint64_t)var & 0x0000FF0000000000) -+#define IPV6_FRAG_OFFSET(var) ((uint64_t)var & 0x000000FF00000000) -+#define GROSS_RUNNING_SUM(var) ((uint64_t)var & 0x00000000FFFF0000) -+#define RUNNING_SUM(var) ((uint64_t)var & 0x000000000000FFFF) -+ -+/** -+ * Macros to get values in word8 -+ */ -+#define PARSE_ERROR_CODE(var) ((uint64_t)var & 0xFF00000000000000) -+#define SOFT_PARSING_CONTEXT(var) ((uint64_t)var & 0x00FFFFFFFFFFFFFF) -+ -+/* Debug frame, otherwise supposed to be discarded */ -+#define DPAA2_ETH_FAS_DISC 0x80000000 -+/* MACSEC frame */ -+#define DPAA2_ETH_FAS_MS 0x40000000 -+#define DPAA2_ETH_FAS_PTP 0x08000000 -+/* Ethernet multicast frame */ -+#define DPAA2_ETH_FAS_MC 0x04000000 -+/* Ethernet broadcast frame */ -+#define DPAA2_ETH_FAS_BC 0x02000000 -+#define DPAA2_ETH_FAS_KSE 0x00040000 -+#define DPAA2_ETH_FAS_EOFHE 0x00020000 -+#define DPAA2_ETH_FAS_MNLE 0x00010000 -+#define DPAA2_ETH_FAS_TIDE 0x00008000 -+#define DPAA2_ETH_FAS_PIEE 0x00004000 -+/* Frame length error */ -+#define DPAA2_ETH_FAS_FLE 0x00002000 -+/* Frame physical error; our favourite pastime */ -+#define DPAA2_ETH_FAS_FPE 0x00001000 -+#define DPAA2_ETH_FAS_PTE 0x00000080 -+#define DPAA2_ETH_FAS_ISP 0x00000040 -+#define DPAA2_ETH_FAS_PHE 0x00000020 -+#define DPAA2_ETH_FAS_BLE 0x00000010 -+/* L3 csum validation performed */ -+#define DPAA2_ETH_FAS_L3CV 0x00000008 -+/* L3 csum error */ -+#define DPAA2_ETH_FAS_L3CE 0x00000004 -+/* L4 csum validation performed */ -+#define DPAA2_ETH_FAS_L4CV 0x00000002 -+/* L4 csum error */ -+#define DPAA2_ETH_FAS_L4CE 0x00000001 -+ -+/* These bits always signal errors */ -+#define DPAA2_ETH_RX_ERR_MASK (DPAA2_ETH_FAS_KSE | \ -+ DPAA2_ETH_FAS_EOFHE | \ -+ DPAA2_ETH_FAS_MNLE | \ -+ DPAA2_ETH_FAS_TIDE | \ -+ DPAA2_ETH_FAS_PIEE | \ -+ DPAA2_ETH_FAS_FLE | \ -+ DPAA2_ETH_FAS_FPE | \ -+ DPAA2_ETH_FAS_PTE | \ -+ DPAA2_ETH_FAS_ISP | \ -+ DPAA2_ETH_FAS_PHE | \ -+ DPAA2_ETH_FAS_BLE | \ -+ DPAA2_ETH_FAS_L3CE | \ -+ DPAA2_ETH_FAS_L4CE) -+/* Unsupported features in the ingress */ -+#define DPAA2_ETH_RX_UNSUPP_MASK DPAA2_ETH_FAS_MS -+/* Tx errors */ -+#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_ETH_FAS_KSE | \ -+ DPAA2_ETH_FAS_EOFHE | \ -+ DPAA2_ETH_FAS_MNLE | \ -+ DPAA2_ETH_FAS_TIDE) -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif -diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h -index 857dc3e..3dc9544 100644 ---- a/lib/librte_eal/common/eal_private.h -+++ b/lib/librte_eal/common/eal_private.h -@@ -325,4 +325,11 @@ int rte_eal_hugepage_init(void); - */ - int rte_eal_hugepage_attach(void); - -+#ifdef RTE_LIBRTE_DPAA2_PMD -+/** -+ * Initialize any soc init related functions if any before thread creation -+ */ -+int rte_eal_soc_pre_init(void); -+#endif -+ - #endif /* _EAL_PRIVATE_H_ */ -diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile -index 182729c..ed5be74 100644 ---- a/lib/librte_eal/linuxapp/eal/Makefile -+++ b/lib/librte_eal/linuxapp/eal/Makefile -@@ -76,6 +76,10 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_lcore.c - SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_timer.c - SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_interrupts.c - SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_alarm.c -+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y) -+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_soc.c -+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio_fsl_mc.c -+endif - ifeq ($(CONFIG_RTE_LIBRTE_IVSHMEM),y) - SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_ivshmem.c - endif -diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c -index 3fb2188..832c252 100644 ---- a/lib/librte_eal/linuxapp/eal/eal.c -+++ b/lib/librte_eal/linuxapp/eal/eal.c -@@ -814,6 +814,11 @@ rte_eal_init(int argc, char **argv) - if (rte_eal_tailqs_init() < 0) - rte_panic("Cannot init tail queues for objects\n"); - -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ if (rte_eal_soc_pre_init() < 0) -+ rte_panic("Cannot pre init soc\n"); -+#endif -+ - #ifdef RTE_LIBRTE_IVSHMEM - if (rte_eal_ivshmem_obj_init() < 0) - rte_panic("Cannot init IVSHMEM objects\n"); -diff --git a/lib/librte_eal/linuxapp/eal/eal_soc.c b/lib/librte_eal/linuxapp/eal/eal_soc.c -new file mode 100644 -index 0000000..1595f68 ---- /dev/null -+++ b/lib/librte_eal/linuxapp/eal/eal_soc.c -@@ -0,0 +1,67 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor, Inc or the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include "eal_private.h" -+ -+#ifdef RTE_LIBRTE_DPAA2_PMD -+#include "eal_vfio_fsl_mc.h" -+#endif -+ -+#if (defined RTE_LIBRTE_DPAA_PMD) -+extern int usdpaa_pre_rte_eal_init(void); -+#endif -+ -+/* Initialize any soc init related functions if any before thread creation*/ -+int -+rte_eal_soc_pre_init(void) -+{ -+#ifdef RTE_LIBRTE_DPAA2_PMD -+ if (rte_eal_dpaa2_init() < 0) -+ RTE_LOG(WARNING, EAL, "Cannot init FSL_MC SCAN\n"); -+#endif -+#if (defined RTE_LIBRTE_DPAA_PMD) -+ if (usdpaa_pre_rte_eal_init()) -+ RTE_LOG(WARNING, EAL, "Cannot init FSL_DPAA \n"); -+#endif -+ return 0; -+} -diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c -new file mode 100644 -index 0000000..0ddaef9 ---- /dev/null -+++ b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c -@@ -0,0 +1,650 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "rte_pci.h" -+#include "eal_vfio.h" -+ -+#include -+ -+#include "eal_vfio_fsl_mc.h" -+ -+#include "rte_pci_dev_ids.h" -+#include "eal_filesystem.h" -+#include "eal_private.h" -+ -+#ifndef VFIO_MAX_GROUPS -+#define VFIO_MAX_GROUPS 64 -+#endif -+ -+/* #define DPAA2_STAGE2_STASHING */ -+ -+/** Pathname of FSL-MC devices directory. */ -+#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices" -+ -+/* Number of VFIO containers & groups with in */ -+static struct fsl_vfio_group vfio_groups[VFIO_MAX_GRP]; -+static struct fsl_vfio_container vfio_containers[VFIO_MAX_CONTAINERS]; -+static char *ls2bus_container; -+static int container_device_fd; -+static uint32_t *msi_intr_vaddr; -+void *(*mcp_ptr_list); -+static uint32_t mcp_id; -+ -+static int vfio_connect_container(struct fsl_vfio_group *vfio_group) -+{ -+ struct fsl_vfio_container *container; -+ int i, fd, ret; -+ -+ /* Try connecting to vfio container already created */ -+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) { -+ container = &vfio_containers[i]; -+ if (!ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { -+ RTE_LOG(ERR, EAL, "Container pre-exists with FD[0x%x]" -+ " for this group\n", container->fd); -+ vfio_group->container = container; -+ return 0; -+ } -+ } -+ -+ /* Opens main vfio file descriptor which represents the "container" */ -+ fd = open("/dev/vfio/vfio", O_RDWR); -+ if (fd < 0) { -+ RTE_LOG(ERR, EAL, "vfio: failed to open /dev/vfio/vfio\n"); -+ return -errno; -+ } -+ -+ ret = ioctl(fd, VFIO_GET_API_VERSION); -+ if (ret != VFIO_API_VERSION) { -+ RTE_LOG(ERR, EAL, "vfio: supported vfio version: %d, " -+ "reported version: %d", VFIO_API_VERSION, ret); -+ close(fd); -+ return -EINVAL; -+ } -+#ifndef DPAA2_STAGE2_STASHING -+ /* Check whether support for SMMU type IOMMU prresent or not */ -+ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) { -+ /* Connect group to container */ -+ ret = ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &fd); -+ if (ret) { -+ RTE_LOG(ERR, EAL, "vfio: failed to set group container:\n"); -+ close(fd); -+ return -errno; -+ } -+ -+ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU); -+ if (ret) { -+ RTE_LOG(ERR, EAL, "vfio: failed to set iommu for container:\n"); -+ close(fd); -+ return -errno; -+ } -+ } else { -+ RTE_LOG(ERR, EAL, "vfio error: No supported IOMMU\n"); -+ close(fd); -+ return -EINVAL; -+ } -+#else -+ /* Check whether support for SMMU type IOMMU stage 2 present or not */ -+ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_NESTING_IOMMU)) { -+ /* Connect group to container */ -+ ret = ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &fd); -+ if (ret) { -+ RTE_LOG(ERR, EAL, "vfio: failed to set group container:\n"); -+ close(fd); -+ return -errno; -+ } -+ -+ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_NESTING_IOMMU); -+ if (ret) { -+ RTE_LOG(ERR, EAL, "vfio: failed to set iommu-2 for container:\n"); -+ close(fd); -+ return -errno; -+ } -+ } else { -+ RTE_LOG(ERR, EAL, "vfio error: No supported IOMMU-2\n"); -+ close(fd); -+ return -EINVAL; -+ } -+#endif -+ container = NULL; -+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) { -+ if (vfio_containers[i].used) -+ continue; -+ RTE_LOG(ERR, EAL, "DPAA2-Unused container at index %d\n", i); -+ container = &vfio_containers[i]; -+ } -+ if (!container) { -+ RTE_LOG(ERR, EAL, "vfio error: No Free Container Found\n"); -+ close(fd); -+ return -ENOMEM; -+ } -+ -+ container->used = 1; -+ container->fd = fd; -+ container->group_list[container->index] = vfio_group; -+ vfio_group->container = container; -+ container->index++; -+ return 0; -+} -+ -+static int vfio_map_irq_region(struct fsl_vfio_group *group) -+{ -+ int ret; -+ unsigned long *vaddr = NULL; -+ struct vfio_iommu_type1_dma_map map = { -+ .argsz = sizeof(map), -+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, -+ .vaddr = 0x6030000, -+ .iova = 0x6030000, -+ .size = 0x1000, -+ }; -+ -+ vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE | -+ PROT_READ, MAP_SHARED, container_device_fd, 0x6030000); -+ if (vaddr == MAP_FAILED) { -+ RTE_LOG(ERR, EAL, " mapping GITS region (errno = %d)", errno); -+ return -errno; -+ } -+ -+ msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64); -+ map.vaddr = (unsigned long)vaddr; -+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map); -+ if (ret == 0) -+ return 0; -+ -+ RTE_LOG(ERR, EAL, "vfio_map_irq_region fails (errno = %d)", errno); -+ return -errno; -+} -+ -+int vfio_dmamap_mem_region(uint64_t vaddr, -+ uint64_t iova, -+ uint64_t size) -+{ -+ struct fsl_vfio_group *group; -+ struct vfio_iommu_type1_dma_map dma_map = { -+ .argsz = sizeof(dma_map), -+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, -+ }; -+ -+ dma_map.vaddr = vaddr; -+ dma_map.size = size; -+ dma_map.iova = iova; -+ -+ /* SET DMA MAP for IOMMU */ -+ group = &vfio_groups[0]; -+ if (ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map)) { -+ RTE_LOG(ERR, EAL, "SWP: VFIO_IOMMU_MAP_DMA API Error %d.\n", errno); -+ return -1; -+ } -+ return 0; -+} -+ -+static int32_t setup_dmamap(void) -+{ -+ int ret; -+ struct fsl_vfio_group *group; -+ struct vfio_iommu_type1_dma_map dma_map = { -+ .argsz = sizeof(struct vfio_iommu_type1_dma_map), -+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, -+ }; -+ -+ int i; -+ const struct rte_memseg *memseg; -+ -+ for (i = 0; i < RTE_MAX_MEMSEG; i++) { -+ memseg = rte_eal_get_physmem_layout(); -+ if (memseg == NULL) { -+ RTE_LOG(ERR, EAL, -+ "\nError Cannot get physical layout\n"); -+ return -ENODEV; -+ } -+ -+ if (memseg[i].addr == NULL && memseg[i].len == 0) { -+ break; -+ } -+ -+ dma_map.size = memseg[i].len; -+ dma_map.vaddr = memseg[i].addr_64; -+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA -+ dma_map.iova = memseg[i].phys_addr; -+#else -+ dma_map.iova = dma_map.vaddr; -+#endif -+ -+ /* SET DMA MAP for IOMMU */ -+ group = &vfio_groups[0]; -+ -+ printf("-->Initial SHM Virtual ADDR %llX\n", dma_map.vaddr); -+ printf("-----> DMA size 0x%llX\n", dma_map.size); -+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map); -+ if (ret) { -+ RTE_LOG(ERR, EAL, -+ "\nErr: VFIO_IOMMU_MAP_DMA API Error %d.\n", -+ errno); -+ return ret; -+ } -+ printf("-----> dma_map.vaddr = 0x%llX\n", dma_map.vaddr); -+ } -+ -+ /* TODO - This is a W.A. as VFIO currently does not add the mapping of -+ the interrupt region to SMMU. This should be removed once the -+ support is added in the Kernel. -+ */ -+ vfio_map_irq_region(group); -+ -+ return 0; -+} -+ -+static int vfio_set_group(struct fsl_vfio_group *group, int groupid) -+{ -+ char path[PATH_MAX]; -+ struct vfio_group_status status = { .argsz = sizeof(status) }; -+ -+ /* Open the VFIO file corresponding to the IOMMU group */ -+ snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); -+ -+ group->fd = open(path, O_RDWR); -+ if (group->fd < 0) { -+ RTE_LOG(ERR, EAL, "vfio: error opening %s\n", path); -+ return -1; -+ } -+ -+ /* Test & Verify that group is VIABLE & AVAILABLE */ -+ if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { -+ RTE_LOG(ERR, EAL, "vfio: error getting group status\n"); -+ close(group->fd); -+ return -1; -+ } -+ if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { -+ RTE_LOG(ERR, EAL, "vfio: group not viable\n"); -+ close(group->fd); -+ return -1; -+ } -+ /* Since Group is VIABLE, Store the groupid */ -+ group->groupid = groupid; -+ -+ /* Now connect this IOMMU group to given container */ -+ if (vfio_connect_container(group)) { -+ RTE_LOG(ERR, EAL, -+ "vfio: error sonnecting container with group %d\n", -+ groupid); -+ close(group->fd); -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static int32_t setup_vfio_grp(char *vfio_container) -+{ -+ char path[PATH_MAX]; -+ char iommu_group_path[PATH_MAX], *group_name; -+ struct fsl_vfio_group *group = NULL; -+ struct stat st; -+ int groupid; -+ int ret, len, i; -+ -+ printf("\tProcessing Container = %s\n", vfio_container); -+ sprintf(path, "/sys/bus/fsl-mc/devices/%s", vfio_container); -+ /* Check whether ls-container exists or not */ -+ printf("\tcontainer device path = %s\n", path); -+ if (stat(path, &st) < 0) { -+ RTE_LOG(ERR, EAL, "vfio: Error (%d) getting FSL-MC device (%s)\n", -+ errno, path); -+ return -errno; -+ } -+ -+ /* DPRC container exists. NOw checkout the IOMMU Group */ -+ strncat(path, "/iommu_group", sizeof(path) - strlen(path) - 1); -+ -+ len = readlink(path, iommu_group_path, PATH_MAX); -+ if (len == -1) { -+ RTE_LOG(ERR, EAL, "\tvfio: error no iommu_group for device\n"); -+ RTE_LOG(ERR, EAL, "\t%s: len = %d, errno = %d\n", -+ path, len, errno); -+ return -errno; -+ } -+ -+ iommu_group_path[len] = 0; -+ group_name = basename(iommu_group_path); -+ if (sscanf(group_name, "%d", &groupid) != 1) { -+ RTE_LOG(ERR, EAL, "\tvfio: error reading %s: %m\n", path); -+ return -errno; -+ } -+ -+ RTE_LOG(INFO, EAL, "\tvfio: iommu group id = %d\n", groupid); -+ -+ /* Check if group already exists */ -+ for (i = 0; i < VFIO_MAX_GRP; i++) { -+ group = &vfio_groups[i]; -+ if (group->groupid == groupid) { -+ RTE_LOG(ERR, EAL, "groupid already exists %d\n", groupid); -+ return 0; -+ } -+ } -+ -+ if (vfio_set_group(group, groupid)) { -+ RTE_LOG(ERR, EAL, "group setup failure - %d\n", groupid); -+ return -ENODEV; -+ } -+ -+ /* Get Device information */ -+ ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, vfio_container); -+ if (ret < 0) { -+ RTE_LOG(ERR, EAL, "\tvfio: error getting device %s fd from group %d\n", -+ vfio_container, group->groupid); -+ return ret; -+ } -+ container_device_fd = ret; -+ RTE_LOG(INFO, EAL, "vfio: Container FD is [0x%X]\n", container_device_fd); -+ /* Set up SMMU */ -+ ret = setup_dmamap(); -+ if (ret) { -+ RTE_LOG(ERR, EAL, ": Setting dma map\n"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int64_t vfio_map_mcp_obj(struct fsl_vfio_group *group, char *mcp_obj) -+{ -+ int64_t v_addr = (int64_t)MAP_FAILED; -+ int32_t ret, mc_fd; -+ -+ struct vfio_device_info d_info = { .argsz = sizeof(d_info) }; -+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; -+ -+ /* getting the mcp object's fd*/ -+ mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj); -+ if (mc_fd < 0) { -+ RTE_LOG(ERR, EAL, "vfio: error getting device %s fd from group %d\n", -+ mcp_obj, group->fd); -+ return v_addr; -+ } -+ -+ /* getting device info*/ -+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info); -+ if (ret < 0) { -+ RTE_LOG(ERR, EAL, "vfio: error getting DEVICE_INFO\n"); -+ goto MC_FAILURE; -+ } -+ -+ /* getting device region info*/ -+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); -+ if (ret < 0) { -+ RTE_LOG(ERR, EAL, "vfio: error getting REGION_INFO\n"); -+ goto MC_FAILURE; -+ } -+ -+ RTE_LOG(INFO, EAL, "region offset = %llx , region size = %llx\n", -+ reg_info.offset, reg_info.size); -+ -+ v_addr = (uint64_t)mmap(NULL, reg_info.size, -+ PROT_WRITE | PROT_READ, MAP_SHARED, -+ mc_fd, reg_info.offset); -+ -+MC_FAILURE: -+ close(mc_fd); -+ -+ return v_addr; -+} -+ -+/* Following function shall fetch total available list of MC devices -+ * from VFIO container & populate private list of devices and other -+ * data structures -+ */ -+static int vfio_process_group_devices(void) -+{ -+ struct fsl_vfio_device *vdev; -+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) }; -+ char *temp_obj, *object_type, *mcp_obj, *dev_name; -+ int32_t object_id, i, dev_fd, ret; -+ DIR *d; -+ struct dirent *dir; -+ char path[PATH_MAX]; -+ int64_t v_addr; -+ int ndev_count; -+ struct fsl_vfio_group *group = &vfio_groups[0]; -+ -+ sprintf(path, "/sys/kernel/iommu_groups/%d/devices", group->groupid); -+ -+ d = opendir(path); -+ if (!d) { -+ RTE_LOG(ERR, EAL, "Unable to open directory %s\n", path); -+ return -1; -+ } -+ -+ /*Counting the number of devices in a group and getting the mcp ID*/ -+ ndev_count = 0; -+ mcp_obj = NULL; -+ while ((dir = readdir(d)) != NULL) { -+ if (dir->d_type == DT_LNK) { -+ ndev_count++; -+ if (!strncmp("dpmcp", dir->d_name, 5)) { -+ if (mcp_obj) -+ free(mcp_obj); -+ mcp_obj = malloc(sizeof(dir->d_name)); -+ if (!mcp_obj) { -+ RTE_LOG(ERR, EAL, -+ "Unable to allocate memory\n"); -+ return -ENOMEM; -+ } -+ strcpy(mcp_obj, dir->d_name); -+ temp_obj = strtok(dir->d_name, "."); -+ temp_obj = strtok(NULL, "."); -+ sscanf(temp_obj, "%d", &mcp_id); -+ } -+ } -+ } -+ closedir(d); -+ -+ if (!mcp_obj) { -+ RTE_LOG(ERR, EAL, "MCP Object not Found\n"); -+ return -ENODEV; -+ } -+ RTE_LOG(INFO, EAL, "Total devices in conatiner = %d, MCP ID = %d\n", -+ ndev_count, mcp_id); -+ -+ /* Allocate the memory depends upon number of objects in a group*/ -+ group->vfio_device = (struct fsl_vfio_device *)malloc(ndev_count * sizeof(struct fsl_vfio_device)); -+ if (!(group->vfio_device)) { -+ RTE_LOG(ERR, EAL, "Unable to allocate memory\n"); -+ free(mcp_obj); -+ return -ENOMEM; -+ } -+ -+ /* Allocate memory for MC Portal list */ -+ mcp_ptr_list = malloc(sizeof(void *) * 1); -+ if (!mcp_ptr_list) { -+ RTE_LOG(ERR, EAL, "NO Memory!\n"); -+ free(mcp_obj); -+ goto FAILURE; -+ } -+ -+ v_addr = vfio_map_mcp_obj(group, mcp_obj); -+ free(mcp_obj); -+ if (v_addr == (int64_t)MAP_FAILED) { -+ RTE_LOG(ERR, EAL, "mapping region (errno = %d)\n", errno); -+ goto FAILURE; -+ } -+ -+ RTE_LOG(INFO, EAL, "MC has VIR_ADD = 0x%ld\n", v_addr); -+ -+ mcp_ptr_list[0] = (void *)v_addr; -+ -+ d = opendir(path); -+ if (!d) { -+ RTE_LOG(ERR, EAL, "Directory %s not able to open\n", path); -+ goto FAILURE; -+ } -+ -+ i = 0; -+ printf("\nDPAA2 - Parsing MC Device Objects:\n"); -+ /* Parsing each object and initiating them*/ -+ while ((dir = readdir(d)) != NULL) { -+ if (dir->d_type != DT_LNK) -+ continue; -+ if (!strncmp("dprc", dir->d_name, 4) || !strncmp("dpmcp", dir->d_name, 5)) -+ continue; -+ dev_name = malloc(sizeof(dir->d_name)); -+ if (!dev_name) { -+ RTE_LOG(ERR, EAL, "Unable to allocate memory\n"); -+ goto FAILURE; -+ } -+ strcpy(dev_name, dir->d_name); -+ object_type = strtok(dir->d_name, "."); -+ temp_obj = strtok(NULL, "."); -+ sscanf(temp_obj, "%d", &object_id); -+ RTE_LOG(INFO, EAL, "%s ", dev_name); -+ -+ /* getting the device fd*/ -+ dev_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, dev_name); -+ if (dev_fd < 0) { -+ RTE_LOG(ERR, EAL, "vfio getting device %s fd from group %d\n", -+ dev_name, group->fd); -+ free(dev_name); -+ goto FAILURE; -+ } -+ -+ free(dev_name); -+ vdev = &group->vfio_device[group->object_index++]; -+ vdev->fd = dev_fd; -+ vdev->index = i; -+ i++; -+ /* Get Device inofrmation */ -+ if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &device_info)) { -+ RTE_LOG(ERR, EAL, "VFIO_DEVICE_FSL_MC_GET_INFO failed\n"); -+ goto FAILURE; -+ } -+ -+ if (!strcmp(object_type, "dpni") || -+ !strcmp(object_type, "dpseci")) { -+ struct rte_pci_device *dev; -+ -+ dev = malloc(sizeof(struct rte_pci_device)); -+ if (dev == NULL) { -+ return -1; -+ } -+ memset(dev, 0, sizeof(*dev)); -+ /* store hw_id of dpni/dpseci device */ -+ dev->addr.devid = object_id; -+ dev->id.vendor_id = FSL_VENDOR_ID; -+ dev->id.device_id = (strcmp(object_type, "dpseci")) ? -+ FSL_MC_DPNI_DEVID : FSL_MC_DPSECI_DEVID; -+ -+ TAILQ_INSERT_TAIL(&pci_device_list, dev, next); -+ } -+ -+ if (!strcmp(object_type, "dpio")) { -+ dpaa2_create_dpio_device(vdev, &device_info, object_id); -+ } -+ -+ if (!strcmp(object_type, "dpbp")) { -+ dpaa2_create_dpbp_device(object_id); -+ } -+ } -+ closedir(d); -+ -+ ret = dpaa2_affine_qbman_swp(); -+ if (ret) -+ RTE_LOG(ERR, EAL, "%s(): Err in affining qbman swp\n", __func__); -+ -+ return 0; -+ -+FAILURE: -+ free(group->vfio_device); -+ group->vfio_device = NULL; -+ return -1; -+} -+ -+/* -+ * Scan the content of the PCI bus, and the devices in the devices -+ * list -+ */ -+static int -+fsl_mc_scan(void) -+{ -+ char path[PATH_MAX]; -+ struct stat st; -+ -+ ls2bus_container = getenv("DPRC"); -+ -+ if (ls2bus_container == NULL) { -+ RTE_LOG(WARNING, EAL, "vfio container not set in env DPRC\n"); -+ return -1; -+ } -+ -+ snprintf(path, sizeof(path), "%s/%s", SYSFS_FSL_MC_DEVICES, -+ ls2bus_container); -+ /* Check whether LS-Container exists or not */ -+ RTE_LOG(INFO, EAL, "\tcontainer device path = %s\n", path); -+ if (stat(path, &st) < 0) { -+ RTE_LOG(ERR, EAL, "vfio:fsl-mc device does not exists\n"); -+ return -1; -+ } -+ return 0; -+} -+ -+/* Init the FSL-MC- LS2 EAL subsystem */ -+int -+rte_eal_dpaa2_init(void) -+{ -+ if (fsl_mc_scan() < 0) -+ return -1; -+ -+#ifdef VFIO_PRESENT -+ if (setup_vfio_grp(ls2bus_container)) { -+ RTE_LOG(ERR, EAL, "setup_vfio_grp\n"); -+ return -1; -+ } -+ if (vfio_process_group_devices()) { -+ RTE_LOG(ERR, EAL, "vfio_process_group_devices\n"); -+ return -1; -+ } -+#endif -+ return 0; -+} -diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h -new file mode 100644 -index 0000000..cf2bd38 ---- /dev/null -+++ b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h -@@ -0,0 +1,98 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Freescale Semiconductor nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef _EAL_VFIO_FSL_MC_H_ -+#define _EAL_VFIO_FSL_MC_H_ -+ -+#include -+#include -+#include "eal_vfio.h" -+ -+#define FSL_VENDOR_ID 0x1957 -+#define FSL_MC_DPNI_DEVID 7 -+#define FSL_MC_DPSECI_DEVID 3 -+ -+#define VFIO_MAX_GRP 1 -+#define VFIO_MAX_CONTAINERS 1 -+ -+#define DPAA2_MBUF_HW_ANNOTATION 64 -+#define DPAA2_FD_PTA_SIZE 64 -+ -+#if (DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM -+#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM" -+#endif -+ -+/* we will re-use the HEADROOM for annotation in RX */ -+#define DPAA2_HW_BUF_RESERVE 0 -+#define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */ -+ -+typedef struct fsl_vfio_device { -+ int fd; /* fsl_mc root container device ?? */ -+ int index; /*index of child object */ -+ struct fsl_vfio_device *child; /* Child object */ -+} fsl_vfio_device; -+ -+typedef struct fsl_vfio_group { -+ int fd; /* /dev/vfio/"groupid" */ -+ int groupid; -+ struct fsl_vfio_container *container; -+ int object_index; -+ struct fsl_vfio_device *vfio_device; -+} fsl_vfio_group; -+ -+typedef struct fsl_vfio_container { -+ int fd; /* /dev/vfio/vfio */ -+ int used; -+ int index; /* index in group list */ -+ struct fsl_vfio_group *group_list[VFIO_MAX_GRP]; -+} fsl_vfio_container; -+ -+int vfio_dmamap_mem_region( -+ uint64_t vaddr, -+ uint64_t iova, -+ uint64_t size); -+ -+/* initialize the NXP/FSL dpaa2 accelerators */ -+int rte_eal_dpaa2_init(void); -+ -+int dpaa2_create_dpio_device(struct fsl_vfio_device *vdev, -+ struct vfio_device_info *obj_info, -+ int object_id); -+ -+int dpaa2_create_dpbp_device(int dpbp_id); -+ -+int dpaa2_affine_qbman_swp(void); -+ -+int dpaa2_affine_qbman_swp_sec(void); -+ -+#endif -+ -diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h -index 059ad9e..d620ad1 100644 ---- a/lib/librte_mempool/rte_mempool.h -+++ b/lib/librte_mempool/rte_mempool.h -@@ -262,6 +262,14 @@ struct rte_mempool { - #define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */ - #define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */ - -+#ifdef RTE_LIBRTE_DPAA2_PMD -+/* TODO: This should be removed once mempool integration is complete. Primary -+ * reason for this is identification of DPAA1/2 memory pool for forwarding -+ * case -+ */ -+#define MEMPOOL_F_HW_PKT_POOL 0x0080 -+#endif -+ - /** - * @internal When debug is enabled, store some statistics. - * -diff --git a/mk/rte.app.mk b/mk/rte.app.mk -index eb28e11..11ae122 100644 ---- a/mk/rte.app.mk -+++ b/mk/rte.app.mk -@@ -101,6 +101,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile - - _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond - _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt -lxenstore -+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += -lrte_pmd_dpaa2 - - ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n) - # plugins (link only if static libraries) --- -2.5.0 - diff --git a/dpdk/dpdk-16.07_patches/0005-drivers-reset-packet_type-before-using-buffer.patch b/dpdk/dpdk-16.07_patches/0005-drivers-reset-packet_type-before-using-buffer.patch deleted file mode 100644 index d20ff28f..00000000 --- a/dpdk/dpdk-16.07_patches/0005-drivers-reset-packet_type-before-using-buffer.patch +++ /dev/null @@ -1,70 +0,0 @@ -From 729a464f9a58fc77bf4e8f527a7848c6153e4b75 Mon Sep 17 00:00:00 2001 -From: Ray Kinsella -Date: Mon, 8 Aug 2016 19:41:59 +0100 -Subject: [PATCH] drivers: reset packet_type before using buffer - -Ensure the packet_type is reset before the buffer is used. This can cause packets to be mishandled in systems with more than one type of driver in use. - -Signed-off-by: Ray Kinsella -Signed-off-by: Todd Foggoa (tfoggoa) ---- - drivers/net/e1000/em_rxtx.c | 2 ++ - drivers/net/virtio/virtio_rxtx.c | 2 ++ - drivers/net/vmxnet3/vmxnet3_rxtx.c | 1 + - 3 files changed, 5 insertions(+) - -diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c -index 6d8750a..693dd9d 100644 ---- a/drivers/net/e1000/em_rxtx.c -+++ b/drivers/net/e1000/em_rxtx.c -@@ -784,6 +784,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - rxm->ol_flags = rx_desc_status_to_pkt_flags(status); - rxm->ol_flags = rxm->ol_flags | - rx_desc_error_to_pkt_flags(rxd.errors); -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); -@@ -1010,6 +1011,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - first_seg->ol_flags = rx_desc_status_to_pkt_flags(status); - first_seg->ol_flags = first_seg->ol_flags | - rx_desc_error_to_pkt_flags(rxd.errors); -+ first_seg->packet_type = RTE_PTYPE_UNKNOWN; - - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); -diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c -index 724517e..f1a572d 100644 ---- a/drivers/net/virtio/virtio_rxtx.c -+++ b/drivers/net/virtio/virtio_rxtx.c -@@ -677,6 +677,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxm->data_off = RTE_PKTMBUF_HEADROOM; - rxm->ol_flags = 0; - rxm->vlan_tci = 0; -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - rxm->nb_segs = 1; - rxm->next = NULL; -@@ -800,6 +801,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, - rxm->vlan_tci = 0; - rxm->pkt_len = (uint32_t)(len[0] - hdr_size); - rxm->data_len = (uint16_t)(len[0] - hdr_size); -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - rxm->port = rxvq->port_id; - rx_pkts[nb_rx] = rxm; -diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c -index 9deeb3f..ac11d82 100644 ---- a/drivers/net/vmxnet3/vmxnet3_rxtx.c -+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c -@@ -686,6 +686,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxm->data_off = RTE_PKTMBUF_HEADROOM; - rxm->ol_flags = 0; - rxm->vlan_tci = 0; -+ rxm->packet_type = RTE_PTYPE_UNKNOWN; - - /* - * If this is the first buffer of the received packet, --- -1.9.1 - diff --git a/dpdk/dpdk-16.07_patches/0006-Allow-applications-to-override-rte_delay_us.patch b/dpdk/dpdk-16.07_patches/0006-Allow-applications-to-override-rte_delay_us.patch deleted file mode 100644 index 8a32f600..00000000 --- a/dpdk/dpdk-16.07_patches/0006-Allow-applications-to-override-rte_delay_us.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 3432c140c9c51e671a4d58bb428d5852426add1f Mon Sep 17 00:00:00 2001 -From: "Todd Foggoa (tfoggoa)" -Date: Wed, 3 Feb 2016 08:35:27 -0800 -Subject: [PATCH 5/6] Allow applications to override rte_delay_us() - -Some applications may wish to define their own implentation of -usec delay other than the existing blocking one. The default -behavior remains unchanged. - -Signed-off-by: Todd Foggoa (tfoggoa) ---- - lib/librte_eal/common/eal_common_timer.c | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/lib/librte_eal/common/eal_common_timer.c b/lib/librte_eal/common/eal_common_timer.c -index c4227cd..cc26b91 100644 ---- a/lib/librte_eal/common/eal_common_timer.c -+++ b/lib/librte_eal/common/eal_common_timer.c -@@ -47,9 +47,21 @@ - /* The frequency of the RDTSC timer resolution */ - static uint64_t eal_tsc_resolution_hz; - -+/* Allow an override of the rte_delay_us function */ -+int rte_delay_us_override (unsigned us) __attribute__((weak)); -+ -+int -+rte_delay_us_override(__attribute__((unused)) unsigned us) -+{ -+ return 0; -+} -+ - void - rte_delay_us(unsigned us) - { -+ if (rte_delay_us_override(us)) -+ return; -+ - const uint64_t start = rte_get_timer_cycles(); - const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6; - while ((rte_get_timer_cycles() - start) < ticks) --- -2.7.4 - diff --git a/dpdk/dpdk-16.07_patches/0007-UIO-Fix-a-crash-in-igb_uio-driver-when-the-device-is.patch b/dpdk/dpdk-16.07_patches/0007-UIO-Fix-a-crash-in-igb_uio-driver-when-the-device-is.patch deleted file mode 100644 index 07e1c9c8..00000000 --- a/dpdk/dpdk-16.07_patches/0007-UIO-Fix-a-crash-in-igb_uio-driver-when-the-device-is.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 95c2d549d8d123aac37a372580122f1b043c6165 Mon Sep 17 00:00:00 2001 -From: Ray Kinsella -Date: Wed, 10 Aug 2016 11:59:07 +0100 -Subject: [PATCH] UIO: Fix a crash in igb_uio driver when the device is - removed. - -This crash happens because the device still has MSI configured, -the fix is to free the IRQ. - -Signed-off-by: Todd Foggoa (tfoggoa) -Signed-off-by: Ray Kinsella ---- - lib/librte_eal/linuxapp/igb_uio/igb_uio.c | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c -index df41e45..69873e7 100644 ---- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c -+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c -@@ -442,8 +442,15 @@ static void - igbuio_pci_remove(struct pci_dev *dev) - { - struct rte_uio_pci_dev *udev = pci_get_drvdata(dev); -+ struct uio_info *info = pci_get_drvdata(dev); - - sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); -+ -+ if (info->irq && (info->irq != UIO_IRQ_CUSTOM)){ -+ free_irq(info->irq, info->uio_dev); -+ info->irq = UIO_IRQ_NONE; -+ } -+ - uio_unregister_device(&udev->info); - igbuio_pci_release_iomem(&udev->info); - if (udev->mode == RTE_INTR_MODE_MSIX) --- -1.9.1 - diff --git a/dpdk/dpdk-16.07_patches/0008-Temporarily-disable-unthrottled-log-message.patch b/dpdk/dpdk-16.07_patches/0008-Temporarily-disable-unthrottled-log-message.patch deleted file mode 100644 index b637993c..00000000 --- a/dpdk/dpdk-16.07_patches/0008-Temporarily-disable-unthrottled-log-message.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 454e25ed57c17ec18ee76ead4a75f9abdf579608 Mon Sep 17 00:00:00 2001 -From: Dave Barach -Date: Tue, 9 Feb 2016 10:22:39 -0500 -Subject: [PATCH 6/6] Temporarily disable unthrottled log message. - -Signed-off-by: Dave Barach ---- - lib/librte_eal/linuxapp/eal/eal_interrupts.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c -index 06b26a9..8d918a4 100644 ---- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c -+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c -@@ -709,6 +709,8 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds) - if (errno == EINTR || errno == EWOULDBLOCK) - continue; - -+ /* $$$ disable to avoid filling /var/log */ -+ if (0) - RTE_LOG(ERR, EAL, "Error reading from file " - "descriptor %d: %s\n", - events[n].data.fd, --- -2.7.4 - diff --git a/dpdk/dpdk-16.07_patches/0009-enic-bad-L4-checksum-ptype-set-on-ICMP-packets.patch b/dpdk/dpdk-16.07_patches/0009-enic-bad-L4-checksum-ptype-set-on-ICMP-packets.patch deleted file mode 100644 index 71a9d9b5..00000000 --- a/dpdk/dpdk-16.07_patches/0009-enic-bad-L4-checksum-ptype-set-on-ICMP-packets.patch +++ /dev/null @@ -1,18 +0,0 @@ -diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c -index 50f0b28..ad59613 100644 ---- a/drivers/net/enic/enic_rxtx.c -+++ b/drivers/net/enic/enic_rxtx.c -@@ -212,9 +212,12 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) - /* checksum flags */ - if (!enic_cq_rx_desc_csum_not_calc(cqrd) && - (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) { -+ uint32_t l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK; -+ - if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd))) - pkt_flags |= PKT_RX_IP_CKSUM_BAD; -- if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { -+ if (l4_flags == RTE_PTYPE_L4_UDP || -+ l4_flags == RTE_PTYPE_L4_TCP) { - if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))) - pkt_flags |= PKT_RX_L4_CKSUM_BAD; - } diff --git a/dpdk/dpdk-16.07_patches/0010-virtio-enable-indirect-descriptors-feature.patch b/dpdk/dpdk-16.07_patches/0010-virtio-enable-indirect-descriptors-feature.patch deleted file mode 100644 index 80cd4bff..00000000 --- a/dpdk/dpdk-16.07_patches/0010-virtio-enable-indirect-descriptors-feature.patch +++ /dev/null @@ -1,34 +0,0 @@ -From be1210e77f0f9072ccb8e6970552596b6780a44c Mon Sep 17 00:00:00 2001 -From: Pierre Pfister -Date: Fri, 2 Sep 2016 16:24:57 +0200 -Subject: [PATCH] virtio: enable indirect descriptors feature - -Virtio indirect descriptors are supported by the data-path -but the feature bit is never set during feature negociation. - -This patch simply adds VIRTIO_RING_F_INDIRECT_DESC back to -the supported features bit mask, hence enabling the use of -indirect descriptors when the feature is negociated with the -device. - -Signed-off-by: Pierre Pfister ---- - drivers/net/virtio/virtio_ethdev.h | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h -index 2ecec6e..31c91a5 100644 ---- a/drivers/net/virtio/virtio_ethdev.h -+++ b/drivers/net/virtio/virtio_ethdev.h -@@ -63,6 +63,7 @@ - 1u << VIRTIO_NET_F_CTRL_RX | \ - 1u << VIRTIO_NET_F_CTRL_VLAN | \ - 1u << VIRTIO_NET_F_MRG_RXBUF | \ -+ 1u << VIRTIO_RING_F_INDIRECT_DESC | \ - 1ULL << VIRTIO_F_VERSION_1) - - /* --- -2.7.4 (Apple Git-66) - - diff --git a/src/vnet/devices/dpdk/dpdk.h b/src/vnet/devices/dpdk/dpdk.h index 1b54460e..79c694f7 100644 --- a/src/vnet/devices/dpdk/dpdk.h +++ b/src/vnet/devices/dpdk/dpdk.h @@ -66,7 +66,6 @@ extern vnet_device_class_t dpdk_device_class; extern vlib_node_registration_t dpdk_input_node; extern vlib_node_registration_t handoff_dispatch_node; -#if RTE_VERSION >= RTE_VERSION_NUM(16, 11, 0, 0) #define foreach_dpdk_pmd \ _ ("net_thunderx", THUNDERX) \ _ ("net_e1000_em", E1000EM) \ @@ -85,25 +84,6 @@ extern vlib_node_registration_t handoff_dispatch_node; _ ("net_cxgbe", CXGBE) \ _ ("net_mlx5", MLX5) \ _ ("net_dpaa2", DPAA2) -#else -#define foreach_dpdk_pmd \ - _ ("rte_nicvf_pmd", THUNDERX) \ - _ ("rte_em_pmd", E1000EM) \ - _ ("rte_igb_pmd", IGB) \ - _ ("rte_igbvf_pmd", IGBVF) \ - _ ("rte_ixgbe_pmd", IXGBE) \ - _ ("rte_ixgbevf_pmd", IXGBEVF) \ - _ ("rte_i40e_pmd", I40E) \ - _ ("rte_i40evf_pmd", I40EVF) \ - _ ("rte_virtio_pmd", VIRTIO) \ - _ ("rte_enic_pmd", ENIC) \ - _ ("rte_vmxnet3_pmd", VMXNET3) \ - _ ("AF_PACKET PMD", AF_PACKET) \ - _ ("rte_bond_pmd", BOND) \ - _ ("rte_pmd_fm10k", FM10K) \ - _ ("rte_cxgbe_pmd", CXGBE) \ - _ ("rte_dpaa2_dpni", DPAA2) -#endif typedef enum { diff --git a/src/vnet/devices/dpdk/format.c b/src/vnet/devices/dpdk/format.c index cc0d71af..1558630c 100644 --- a/src/vnet/devices/dpdk/format.c +++ b/src/vnet/devices/dpdk/format.c @@ -79,12 +79,6 @@ _(DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, "outer-ipv4-cksum") \ _(DEV_TX_OFFLOAD_QINQ_INSERT, "qinq-insert") -#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0) -/* New ol_flags bits added in DPDK-16.11 */ -#define PKT_RX_IP_CKSUM_GOOD (1ULL << 7) -#define PKT_RX_L4_CKSUM_GOOD (1ULL << 8) -#endif - #define foreach_dpdk_pkt_rx_offload_flag \ _ (PKT_RX_VLAN_PKT, "RX packet is a 802.1q VLAN packet") \ _ (PKT_RX_RSS_HASH, "RX packet with RSS hash result") \ @@ -98,12 +92,6 @@ _ (PKT_RX_IEEE1588_TMST, "RX IEEE1588 L2/L4 timestamped packet") \ _ (PKT_RX_QINQ_STRIPPED, "RX packet QinQ tags stripped") -#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0) -/* PTYPE added in DPDK-16.11 */ -#define RTE_PTYPE_L2_ETHER_VLAN 0x00000006 -#define RTE_PTYPE_L2_ETHER_QINQ 0x00000007 -#endif - #define foreach_dpdk_pkt_type \ _ (L2, ETHER, "Ethernet packet") \ _ (L2, ETHER_TIMESYNC, "Ethernet packet for time sync") \ diff --git a/src/vnet/devices/dpdk/init.c b/src/vnet/devices/dpdk/init.c index ec008c20..f4700133 100755 --- a/src/vnet/devices/dpdk/init.c +++ b/src/vnet/devices/dpdk/init.c @@ -428,11 +428,8 @@ dpdk_lib_init (dpdk_main_t * dm) /* workaround for drivers not setting driver_name */ if ((!dev_info.driver_name) && (dev_info.pci_dev)) -#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0) - dev_info.driver_name = dev_info.pci_dev->driver->name; -#else dev_info.driver_name = dev_info.pci_dev->driver->driver.name; -#endif + ASSERT (dev_info.driver_name); if (!xd->pmd) @@ -1608,11 +1605,8 @@ dpdk_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) struct rte_eth_dev_info dev_info; rte_eth_dev_info_get (i, &dev_info); if (!dev_info.driver_name) -#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0) - dev_info.driver_name = dev_info.pci_dev->driver->name; -#else dev_info.driver_name = dev_info.pci_dev->driver->driver.name; -#endif + ASSERT (dev_info.driver_name); if (strncmp (dev_info.driver_name, "rte_bond_pmd", 12) == 0) { diff --git a/src/vnet/devices/dpdk/main.c b/src/vnet/devices/dpdk/main.c index 1e6ec2f8..9ea3aa04 100644 --- a/src/vnet/devices/dpdk/main.c +++ b/src/vnet/devices/dpdk/main.c @@ -61,14 +61,12 @@ rte_delay_us_override (unsigned us) return 0; // no override } -#if RTE_VERSION >= RTE_VERSION_NUM(16, 11, 0, 0) static void rte_delay_us_override_cb (unsigned us) { if (rte_delay_us_override (us) == 0) rte_delay_us_block (us); } -#endif static clib_error_t * dpdk_main_init (vlib_main_t * vm) { @@ -77,12 +75,9 @@ static clib_error_t * dpdk_main_init (vlib_main_t * vm) if ((error = vlib_call_init_function (vm, dpdk_init))) return error; -#if DPDK -#if RTE_VERSION >= RTE_VERSION_NUM(16, 11, 0, 0) /* register custom delay function */ rte_delay_us_callback_register (rte_delay_us_override_cb); -#endif -#endif + return error; } diff --git a/src/vnet/devices/dpdk/node.c b/src/vnet/devices/dpdk/node.c index e541cdbc..bde9dfae 100644 --- a/src/vnet/devices/dpdk/node.c +++ b/src/vnet/devices/dpdk/node.c @@ -55,11 +55,6 @@ vlib_buffer_is_mpls (vlib_buffer_t * b) return (h->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS_UNICAST)); } -#if RTE_VERSION < RTE_VERSION_NUM(16, 11, 0, 0) -/* New ol_flags bits added in DPDK-16.11 */ -#define PKT_RX_IP_CKSUM_GOOD (1ULL << 7) -#endif - always_inline u32 dpdk_rx_next_from_etype (struct rte_mbuf * mb, vlib_buffer_t * b0) { @@ -79,15 +74,8 @@ dpdk_rx_next_from_etype (struct rte_mbuf * mb, vlib_buffer_t * b0) always_inline int dpdk_mbuf_is_vlan (struct rte_mbuf *mb) { -#if RTE_VERSION >= RTE_VERSION_NUM(16, 11, 0, 0) return (mb->packet_type & RTE_PTYPE_L2_ETHER_VLAN) == RTE_PTYPE_L2_ETHER_VLAN; -#else - return - (mb->ol_flags & - (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED | PKT_RX_QINQ_STRIPPED)) == - PKT_RX_VLAN_PKT; -#endif } always_inline int -- cgit 1.2.3-korg From fa80f2e421e7e85f429437aad2da9971a2a60a24 Mon Sep 17 00:00:00 2001 From: Sergio Gonzalez Monroy Date: Tue, 7 Mar 2017 14:39:29 +0000 Subject: dpdk: fix plugin linking with sw crypto libraries Change-Id: I3e3bf786ab3c7672ff2cc7acd221421072e3ac8b Signed-off-by: Sergio Gonzalez Monroy --- dpdk/Makefile | 45 ++++++++++++++++++++++++++------------------- src/plugins/dpdk.am | 7 ++++++- 2 files changed, 32 insertions(+), 20 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 8e187cc2..fc93f9c6 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -25,7 +25,7 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 17.02 -PKG_SUFFIX ?= vpp1 +PKG_SUFFIX ?= vpp2 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) @@ -34,9 +34,9 @@ DPDK_17.02_TARBALL_MD5_CKSUM := 6b9f7387c35641f4e8dbba3e528f2376 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) ifeq ($(DPDK_CRYPTO_SW_PMD),y) -AESNIMB_LIB_TARBALL := v0.44.tar.gz +AESNIMB_LIB_TARBALL := v0.44-gcm.2.tar.gz AESNIMB_LIB_TARBALL_URL := http://github.com/01org/intel-ipsec-mb/archive/$(AESNIMB_LIB_TARBALL) -AESNIMB_LIB_SOURCE := $(B)/intel-ipsec-mb-0.44 +AESNIMB_LIB_SOURCE := $(B)/intel-ipsec-mb-0.44-gcm.2 ISA_L_CRYPTO_LIB_TARBALL := isa_l_crypto.tar.gz ISA_L_CRYPTO_LIB_TARBALL_URL := http://github.com/01org/isa-l_crypto/archive/master.tar.gz ISA_L_CRYPTO_LIB_SOURCE := $(B)/isa-l_crypto-master @@ -87,8 +87,8 @@ DPDK_EXTRA_CFLAGS := -g -O0 endif ifeq ($(DPDK_CRYPTO_SW_PMD),y) -DPDK_EXTRA_CFLAGS += -I$(ISA_L_CRYPTO_LIB_SOURCE) -DPDK_EXTRA_LDFLAGS += -L$(ISA_L_CRYPTO_LIB_SOURCE)/.libs +DPDK_EXTRA_CFLAGS += -I$(I)/include +DPDK_EXTRA_LDFLAGS += -L$(I)/lib DPDK_MAKE_EXTRA_ARGS += AESNI_MULTI_BUFFER_LIB_PATH=$(AESNIMB_LIB_SOURCE) endif @@ -158,19 +158,27 @@ $(CURDIR)/$(DPDK_TARBALL): then cp $(DPDK_DOWNLOAD_DIR)/$(DPDK_TARBALL) $(CURDIR) ; \ else curl -o $(CURDIR)/$(DPDK_TARBALL) -LO $(DPDK_TAR_URL) ; \ fi -ifeq ($(DPDK_CRYPTO_SW_PMD),y) + @rm -f $(B)/.download.ok + +$(CURDIR)/$(AESNIMB_LIB_TARBALL): @if [ -e $(DPDK_DOWNLOAD_DIR)/$(AESNIMB_LIB_TARBALL) ] ; \ then cp $(DPDK_DOWNLOAD_DIR)/$(AESNIMB_LIB_TARBALL) $(CURDIR) ; \ - else curl -o $(CURDIR)/$(AESNIMB_LIB_TARBALL) -LO $(AESNIMB_LIB_TARBALL_URL) ; \ + else curl -o $@ -LO $(AESNIMB_LIB_TARBALL_URL) ; \ fi + +$(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL): @if [ -e $(DPDK_DOWNLOAD_DIR)/$(ISA_L_CRYPTO_LIB_TARBALL) ] ; \ then cp $(DPDK_DOWNLOAD_DIR)/$(ISA_L_CRYPTO_LIB_TARBALL) $(CURDIR) ; \ - else curl -o $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL) -LO $(ISA_L_CRYPTO_LIB_TARBALL_URL) ; \ + else curl -o $@ -LO $(ISA_L_CRYPTO_LIB_TARBALL_URL) ; \ fi + +DPDK_DOWNLOADS = $(CURDIR)/$(DPDK_TARBALL) +ifeq ($(DPDK_CRYPTO_SW_PMD),y) +DPDK_DOWNLOADS += $(CURDIR)/$(AESNIMB_LIB_TARBALL) +DPDK_DOWNLOADS += $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL) endif - @rm -f $(B)/.download.ok -$(B)/.download.ok: $(CURDIR)/$(DPDK_TARBALL) +$(B)/.download.ok: $(DPDK_DOWNLOADS) @mkdir -p $(B) @openssl md5 $< | cut -f 2 -d " " - > $(B)/$(DPDK_TARBALL).md5sum @([ "$$(<$(B)/$(DPDK_TARBALL).md5sum)" = "$(DPDK_$(DPDK_VERSION)_TARBALL_MD5_CKSUM)" ] || \ @@ -209,12 +217,6 @@ endif patch: $(B)/.patch.ok $(B)/.config.ok: $(B)/.patch.ok $(B)/custom-config -ifeq ($(DPDK_CRYPTO_SW_PMD),y) - @make -C $(AESNIMB_LIB_SOURCE) - @cd $(ISA_L_CRYPTO_LIB_SOURCE) && ./autogen.sh && ./configure - @make -C $(ISA_L_CRYPTO_LIB_SOURCE) - @cp $(ISA_L_CRYPTO_LIB_SOURCE)/include $(ISA_L_CRYPTO_LIB_SOURCE)/isa-l_crypto -r -endif @make $(DPDK_MAKE_ARGS) config @touch $@ @@ -223,11 +225,16 @@ config: $(B)/.config.ok $(B)/.build.ok: $(DPDK_SOURCE_FILES) @if [ ! -e $(B)/.config.ok ] ; then echo 'Please run "make config" first' && false ; fi - @make $(DPDK_MAKE_ARGS) install ifeq ($(DPDK_CRYPTO_SW_PMD),y) - @cp $(AESNIMB_LIB_SOURCE)/libIPSec_MB.a $(I)/lib/ - @cp $(ISA_L_CRYPTO_LIB_SOURCE)/.libs/libisal_crypto.a $(I)/lib/ + # Build IPsec_MB library + mkdir -p $(I)/lib/ + make -C $(AESNIMB_LIB_SOURCE) -j NO_GCM=y + cp $(AESNIMB_LIB_SOURCE)/libIPSec_MB.a $(I)/lib/ + # Build ISA-L Crypto library + cd $(ISA_L_CRYPTO_LIB_SOURCE) && ./autogen.sh && ./configure --prefix=$(I) + make -C $(ISA_L_CRYPTO_LIB_SOURCE) -j install endif + @make $(DPDK_MAKE_ARGS) install @touch $@ .PHONY: build diff --git a/src/plugins/dpdk.am b/src/plugins/dpdk.am index 01383de6..b857429d 100644 --- a/src/plugins/dpdk.am +++ b/src/plugins/dpdk.am @@ -14,7 +14,12 @@ vppapitestplugins_LTLIBRARIES += dpdk_test_plugin.la vppplugins_LTLIBRARIES += dpdk_plugin.la -dpdk_plugin_la_LDFLAGS = $(AM_LDFLAGS) -Wl,--whole-archive,-l:libdpdk.a,--no-whole-archive,-lm,-ldl +dpdk_plugin_la_LDFLAGS = $(AM_LDFLAGS) -Wl,--whole-archive,-l:libdpdk.a,--no-whole-archive +if WITH_DPDK_CRYPTO_SW +dpdk_plugin_la_LDFLAGS += -Wl,--exclude-libs,libIPSec_MB.a,-l:libIPSec_MB.a +dpdk_plugin_la_LDFLAGS += -Wl,--exclude-libs,libisal_crypto.a,-l:libisal_crypto.a +endif +dpdk_plugin_la_LDFLAGS += -Wl,-lm,-ldl dpdk_plugin_la_SOURCES = \ dpdk/main.c \ -- cgit 1.2.3-korg From 0f60ff8af3dd72bb1fa8f13886a80d110d78c7b0 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Thu, 30 Mar 2017 17:58:42 +0200 Subject: dpdk: add support for Mellanox ConnectX-5 devices Change-Id: I3ed2834a326eac50a7cb4faa592f42fd06325d5a Signed-off-by: Damjan Marion --- dpdk/Makefile | 2 +- ...t-of-buffer-counter-to-extended-statistic.patch | 135 ++++ ...t-mlx5-remove-unused-interface-name-query.patch | 22 + ...mlx5-fix-extended-statistics-wrong-number.patch | 87 +++ ...tended-statistics-counters-identification.patch | 13 + ...5-fix-startup-when-flow-cannot-be-applied.patch | 57 ++ .../0006-net-mlx5-add-hardware-TSO-support.patch | 385 ++++++++++ ...d-hardware-checksum-offload-for-tunnel-pa.patch | 194 +++++ ...d-enhanced-multi-packet-send-for-ConnectX.patch | 809 +++++++++++++++++++++ src/plugins/dpdk/device/init.c | 5 +- 10 files changed, 1707 insertions(+), 2 deletions(-) create mode 100644 dpdk/dpdk-17.02_patches/0001-dpdk-dev-net-mlx5-add-out-of-buffer-counter-to-extended-statistic.patch create mode 100644 dpdk/dpdk-17.02_patches/0002-dpdk-dev-1-2-net-mlx5-remove-unused-interface-name-query.patch create mode 100644 dpdk/dpdk-17.02_patches/0003-dpdk-dev-2-2-net-mlx5-fix-extended-statistics-wrong-number.patch create mode 100644 dpdk/dpdk-17.02_patches/0004-dpdk-dev-net-mlx5-fix-extended-statistics-counters-identification.patch create mode 100644 dpdk/dpdk-17.02_patches/0005-net-mlx5-fix-startup-when-flow-cannot-be-applied.patch create mode 100644 dpdk/dpdk-17.02_patches/0006-net-mlx5-add-hardware-TSO-support.patch create mode 100644 dpdk/dpdk-17.02_patches/0007-add-hardware-checksum-offload-for-tunnel-pa.patch create mode 100644 dpdk/dpdk-17.02_patches/0008-net-mlx5-add-enhanced-multi-packet-send-for-ConnectX.patch (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index fc93f9c6..c46ef0f1 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -25,7 +25,7 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 17.02 -PKG_SUFFIX ?= vpp2 +PKG_SUFFIX ?= vpp3 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) diff --git a/dpdk/dpdk-17.02_patches/0001-dpdk-dev-net-mlx5-add-out-of-buffer-counter-to-extended-statistic.patch b/dpdk/dpdk-17.02_patches/0001-dpdk-dev-net-mlx5-add-out-of-buffer-counter-to-extended-statistic.patch new file mode 100644 index 00000000..3ebf5e8a --- /dev/null +++ b/dpdk/dpdk-17.02_patches/0001-dpdk-dev-net-mlx5-add-out-of-buffer-counter-to-extended-statistic.patch @@ -0,0 +1,135 @@ +diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h +index 879da5e..2b4345a 100644 +--- a/drivers/net/mlx5/mlx5.h ++++ b/drivers/net/mlx5/mlx5.h +@@ -197,6 +197,8 @@ struct mlx5_secondary_data { + int mlx5_is_secondary(void); + int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]); + int priv_ifreq(const struct priv *, int req, struct ifreq *); ++int priv_is_ib_cntr(const char *); ++int priv_get_cntr_sysfs(struct priv *, const char *, uint64_t *); + int priv_get_num_vfs(struct priv *, uint16_t *); + int priv_get_mtu(struct priv *, uint16_t *); + int priv_set_flags(struct priv *, unsigned int, unsigned int); +diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c +index 2145965..6b64f44 100644 +--- a/drivers/net/mlx5/mlx5_ethdev.c ++++ b/drivers/net/mlx5/mlx5_ethdev.c +@@ -234,6 +234,23 @@ struct priv * + } + + /** ++ * Check if the counter is located on ib counters file. ++ * ++ * @param[in] cntr ++ * Counter name. ++ * ++ * @return ++ * 1 if counter is located on ib counters file , 0 otherwise. ++ */ ++int ++priv_is_ib_cntr(const char *cntr) ++{ ++ if (!strcmp(cntr, "out_of_buffer")) ++ return 1; ++ return 0; ++} ++ ++/** + * Read from sysfs entry. + * + * @param[in] priv +@@ -260,10 +277,15 @@ struct priv * + if (priv_get_ifname(priv, &ifname)) + return -1; + +- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path, +- ifname, entry); +- +- file = fopen(path, "rb"); ++ if (priv_is_ib_cntr(entry)) { ++ MKSTR(path, "%s/ports/1/hw_counters/%s", ++ priv->ctx->device->ibdev_path, entry); ++ file = fopen(path, "rb"); ++ } else { ++ MKSTR(path, "%s/device/net/%s/%s", ++ priv->ctx->device->ibdev_path, ifname, entry); ++ file = fopen(path, "rb"); ++ } + if (file == NULL) + return -1; + ret = fread(buf, 1, size, file); +@@ -469,6 +491,30 @@ struct priv * + } + + /** ++ * Read device counter from sysfs. ++ * ++ * @param priv ++ * Pointer to private structure. ++ * @param name ++ * Counter name. ++ * @param[out] cntr ++ * Counter output buffer. ++ * ++ * @return ++ * 0 on success, -1 on failure and errno is set. ++ */ ++int ++priv_get_cntr_sysfs(struct priv *priv, const char *name, uint64_t *cntr) ++{ ++ unsigned long ulong_ctr; ++ ++ if (priv_get_sysfs_ulong(priv, name, &ulong_ctr) == -1) ++ return -1; ++ *cntr = ulong_ctr; ++ return 0; ++} ++ ++/** + * Set device MTU. + * + * @param priv +diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c +index 20c957e..a48ebea 100644 +--- a/drivers/net/mlx5/mlx5_stats.c ++++ b/drivers/net/mlx5/mlx5_stats.c +@@ -125,6 +125,10 @@ struct mlx5_counter_ctrl { + .dpdk_name = "tx_errors_phy", + .ctr_name = "tx_errors_phy", + }, ++ { ++ .dpdk_name = "rx_out_of_buffer", ++ .ctr_name = "out_of_buffer", ++ }, + }; + + static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); +@@ -159,9 +163,15 @@ struct mlx5_counter_ctrl { + WARN("unable to read statistic values from device"); + return -1; + } +- for (i = 0; i != xstats_n; ++i) +- stats[i] = (uint64_t) +- et_stats->data[xstats_ctrl->dev_table_idx[i]]; ++ for (i = 0; i != xstats_n; ++i) { ++ if (priv_is_ib_cntr(mlx5_counters_init[i].ctr_name)) ++ priv_get_cntr_sysfs(priv, ++ mlx5_counters_init[i].ctr_name, ++ &stats[i]); ++ else ++ stats[i] = (uint64_t) ++ et_stats->data[xstats_ctrl->dev_table_idx[i]]; ++ } + return 0; + } + +@@ -233,6 +243,8 @@ struct mlx5_counter_ctrl { + } + } + for (j = 0; j != xstats_n; ++j) { ++ if (priv_is_ib_cntr(mlx5_counters_init[i].ctr_name)) ++ continue; + if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) { + WARN("counter \"%s\" is not recognized", + mlx5_counters_init[j].dpdk_name); diff --git a/dpdk/dpdk-17.02_patches/0002-dpdk-dev-1-2-net-mlx5-remove-unused-interface-name-query.patch b/dpdk/dpdk-17.02_patches/0002-dpdk-dev-1-2-net-mlx5-remove-unused-interface-name-query.patch new file mode 100644 index 00000000..aa03639b --- /dev/null +++ b/dpdk/dpdk-17.02_patches/0002-dpdk-dev-1-2-net-mlx5-remove-unused-interface-name-query.patch @@ -0,0 +1,22 @@ +diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c +index 20c957e..0c80e4f 100644 +--- a/drivers/net/mlx5/mlx5_stats.c ++++ b/drivers/net/mlx5/mlx5_stats.c +@@ -177,17 +177,12 @@ struct mlx5_counter_ctrl { + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + unsigned int i; + unsigned int j; +- char ifname[IF_NAMESIZE]; + struct ifreq ifr; + struct ethtool_drvinfo drvinfo; + struct ethtool_gstrings *strings = NULL; + unsigned int dev_stats_n; + unsigned int str_sz; + +- if (priv_get_ifname(priv, &ifname)) { +- WARN("unable to get interface name"); +- return; +- } + /* How many statistics are available. */ + drvinfo.cmd = ETHTOOL_GDRVINFO; + ifr.ifr_data = (caddr_t)&drvinfo; diff --git a/dpdk/dpdk-17.02_patches/0003-dpdk-dev-2-2-net-mlx5-fix-extended-statistics-wrong-number.patch b/dpdk/dpdk-17.02_patches/0003-dpdk-dev-2-2-net-mlx5-fix-extended-statistics-wrong-number.patch new file mode 100644 index 00000000..05c2e8df --- /dev/null +++ b/dpdk/dpdk-17.02_patches/0003-dpdk-dev-2-2-net-mlx5-fix-extended-statistics-wrong-number.patch @@ -0,0 +1,87 @@ +diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c +index 0c80e4f..60ffbaa 100644 +--- a/drivers/net/mlx5/mlx5_stats.c ++++ b/drivers/net/mlx5/mlx5_stats.c +@@ -166,6 +166,29 @@ struct mlx5_counter_ctrl { + } + + /** ++ * Query the number of statistics provided by ETHTOOL. ++ * ++ * @param priv ++ * Pointer to private structure. ++ * ++ * @return ++ * Number of statistics on success, -1 on error. ++ */ ++static int ++priv_ethtool_get_stats_n(struct priv *priv) { ++ struct ethtool_drvinfo drvinfo; ++ struct ifreq ifr; ++ ++ drvinfo.cmd = ETHTOOL_GDRVINFO; ++ ifr.ifr_data = (caddr_t)&drvinfo; ++ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { ++ WARN("unable to query number of statistics"); ++ return -1; ++ } ++ return drvinfo.n_stats; ++} ++ ++/** + * Init the structures to read device counters. + * + * @param priv +@@ -178,19 +201,11 @@ struct mlx5_counter_ctrl { + unsigned int i; + unsigned int j; + struct ifreq ifr; +- struct ethtool_drvinfo drvinfo; + struct ethtool_gstrings *strings = NULL; + unsigned int dev_stats_n; + unsigned int str_sz; + +- /* How many statistics are available. */ +- drvinfo.cmd = ETHTOOL_GDRVINFO; +- ifr.ifr_data = (caddr_t)&drvinfo; +- if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { +- WARN("unable to get driver info"); +- return; +- } +- dev_stats_n = drvinfo.n_stats; ++ dev_stats_n = priv_ethtool_get_stats_n(priv); + if (dev_stats_n < 1) { + WARN("no extended statistics available"); + return; +@@ -410,7 +425,15 @@ struct mlx5_counter_ctrl { + int ret = xstats_n; + + if (n >= xstats_n && stats) { ++ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; ++ int stats_n; ++ + priv_lock(priv); ++ stats_n = priv_ethtool_get_stats_n(priv); ++ if (stats_n < 0) ++ return -1; ++ if (xstats_ctrl->stats_n != stats_n) ++ priv_xstats_init(priv); + ret = priv_xstats_get(priv, stats); + priv_unlock(priv); + } +@@ -427,8 +450,15 @@ struct mlx5_counter_ctrl { + mlx5_xstats_reset(struct rte_eth_dev *dev) + { + struct priv *priv = mlx5_get_priv(dev); ++ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; ++ int stats_n; + + priv_lock(priv); ++ stats_n = priv_ethtool_get_stats_n(priv); ++ if (stats_n < 0) ++ return; ++ if (xstats_ctrl->stats_n != stats_n) ++ priv_xstats_init(priv); + priv_xstats_reset(priv); + priv_unlock(priv); + } diff --git a/dpdk/dpdk-17.02_patches/0004-dpdk-dev-net-mlx5-fix-extended-statistics-counters-identification.patch b/dpdk/dpdk-17.02_patches/0004-dpdk-dev-net-mlx5-fix-extended-statistics-counters-identification.patch new file mode 100644 index 00000000..8c066ad3 --- /dev/null +++ b/dpdk/dpdk-17.02_patches/0004-dpdk-dev-net-mlx5-fix-extended-statistics-counters-identification.patch @@ -0,0 +1,13 @@ +diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c +index 1953293..703f48c 100644 +--- a/drivers/net/mlx5/mlx5_stats.c ++++ b/drivers/net/mlx5/mlx5_stats.c +@@ -253,7 +253,7 @@ struct mlx5_counter_ctrl { + } + } + for (j = 0; j != xstats_n; ++j) { +- if (priv_is_ib_cntr(mlx5_counters_init[i].ctr_name)) ++ if (priv_is_ib_cntr(mlx5_counters_init[j].ctr_name)) + continue; + if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) { + WARN("counter \"%s\" is not recognized", diff --git a/dpdk/dpdk-17.02_patches/0005-net-mlx5-fix-startup-when-flow-cannot-be-applied.patch b/dpdk/dpdk-17.02_patches/0005-net-mlx5-fix-startup-when-flow-cannot-be-applied.patch new file mode 100644 index 00000000..af928bb2 --- /dev/null +++ b/dpdk/dpdk-17.02_patches/0005-net-mlx5-fix-startup-when-flow-cannot-be-applied.patch @@ -0,0 +1,57 @@ +From 0866d640e42d6c54b2b3f15ebde9930e756ba4d5 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?N=C3=A9lio=20Laranjeiro?= +Date: Wed, 22 Feb 2017 10:57:52 +0100 +Subject: [PATCH] net/mlx5: fix startup when flow cannot be applied + +When flows cannot be re-applied due to configuration modifications, the +start function should rollback the configuration done. + +Fixes: 2097d0d1e2cc ("net/mlx5: support basic flow items and actions") +Cc: stable@dpdk.org + +Signed-off-by: Nelio Laranjeiro +--- + drivers/net/mlx5/mlx5_trigger.c | 21 ++++++++++++++++----- + 1 file changed, 16 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c +index 30addd2..0acbf28 100644 +--- a/drivers/net/mlx5/mlx5_trigger.c ++++ b/drivers/net/mlx5/mlx5_trigger.c +@@ -82,17 +82,28 @@ mlx5_dev_start(struct rte_eth_dev *dev) + ERROR("%p: an error occurred while configuring hash RX queues:" + " %s", + (void *)priv, strerror(err)); +- /* Rollback. */ +- priv_special_flow_disable_all(priv); +- priv_mac_addrs_disable(priv); +- priv_destroy_hash_rxqs(priv); ++ goto error; + } + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) + priv_fdir_enable(priv); +- priv_dev_interrupt_handler_install(priv, dev); + err = priv_flow_start(priv); ++ if (err) { ++ priv->started = 0; ++ ERROR("%p: an error occurred while configuring flows:" ++ " %s", ++ (void *)priv, strerror(err)); ++ goto error; ++ } ++ priv_dev_interrupt_handler_install(priv, dev); + priv_xstats_init(priv); + priv_unlock(priv); ++ return 0; ++error: ++ /* Rollback. */ ++ priv_special_flow_disable_all(priv); ++ priv_mac_addrs_disable(priv); ++ priv_destroy_hash_rxqs(priv); ++ priv_flow_stop(priv); + return -err; + } + +-- +2.7.4 + diff --git a/dpdk/dpdk-17.02_patches/0006-net-mlx5-add-hardware-TSO-support.patch b/dpdk/dpdk-17.02_patches/0006-net-mlx5-add-hardware-TSO-support.patch new file mode 100644 index 00000000..929a6132 --- /dev/null +++ b/dpdk/dpdk-17.02_patches/0006-net-mlx5-add-hardware-TSO-support.patch @@ -0,0 +1,385 @@ +From e25bad4a287924d26627ffe307f8a12824b87054 Mon Sep 17 00:00:00 2001 +From: Shahaf Shuler +Date: Thu, 2 Mar 2017 11:01:31 +0200 +Subject: [PATCH] net/mlx5: add hardware TSO support + +Implement support for hardware TSO. + +Signed-off-by: Shahaf Shuler +Acked-by: Nelio Laranjeiro +--- + doc/guides/nics/features/mlx5.ini | 1 + + doc/guides/nics/mlx5.rst | 12 ++++ + drivers/net/mlx5/mlx5.c | 18 ++++++ + drivers/net/mlx5/mlx5.h | 2 + + drivers/net/mlx5/mlx5_defs.h | 3 + + drivers/net/mlx5/mlx5_ethdev.c | 2 + + drivers/net/mlx5/mlx5_rxtx.c | 123 +++++++++++++++++++++++++++++++++----- + drivers/net/mlx5/mlx5_rxtx.h | 2 + + drivers/net/mlx5/mlx5_txq.c | 13 ++++ + 9 files changed, 160 insertions(+), 16 deletions(-) + +diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini +index f20d214..8df25ce 100644 +--- a/doc/guides/nics/features/mlx5.ini ++++ b/doc/guides/nics/features/mlx5.ini +@@ -11,6 +11,7 @@ Queue start/stop = Y + MTU update = Y + Jumbo frame = Y + Scattered Rx = Y ++TSO = Y + Promiscuous mode = Y + Allmulticast mode = Y + Unicast MAC filter = Y +diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst +index 5f6e594..9b0ba29 100644 +--- a/doc/guides/nics/mlx5.rst ++++ b/doc/guides/nics/mlx5.rst +@@ -90,6 +90,7 @@ Features + - Secondary process TX is supported. + - KVM and VMware ESX SR-IOV modes are supported. + - RSS hash result is supported. ++- Hardware TSO. + + Limitations + ----------- +@@ -186,9 +187,20 @@ Run-time configuration + save PCI bandwidth and improve performance at the cost of a slightly + higher CPU usage. + ++ This option cannot be used in conjunction with ``tso`` below. When ``tso`` ++ is set, ``txq_mpw_en`` is disabled. ++ + It is currently only supported on the ConnectX-4 Lx and ConnectX-5 + families of adapters. Enabled by default. + ++- ``tso`` parameter [int] ++ ++ A nonzero value enables hardware TSO. ++ When hardware TSO is enabled, packets marked with TCP segmentation ++ offload will be divided into segments by the hardware. ++ ++ Disabled by default. ++ + Prerequisites + ------------- + +diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c +index d4bd469..03ed3b3 100644 +--- a/drivers/net/mlx5/mlx5.c ++++ b/drivers/net/mlx5/mlx5.c +@@ -84,6 +84,9 @@ + /* Device parameter to enable multi-packet send WQEs. */ + #define MLX5_TXQ_MPW_EN "txq_mpw_en" + ++/* Device parameter to enable hardware TSO offload. */ ++#define MLX5_TSO "tso" ++ + /** + * Retrieve integer value from environment variable. + * +@@ -290,6 +293,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque) + priv->txqs_inline = tmp; + } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { + priv->mps &= !!tmp; /* Enable MPW only if HW supports */ ++ } else if (strcmp(MLX5_TSO, key) == 0) { ++ priv->tso = !!tmp; + } else { + WARN("%s: unknown parameter", key); + return -EINVAL; +@@ -316,6 +321,7 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs) + MLX5_TXQ_INLINE, + MLX5_TXQS_MIN_INLINE, + MLX5_TXQ_MPW_EN, ++ MLX5_TSO, + NULL, + }; + struct rte_kvargs *kvlist; +@@ -479,6 +485,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + IBV_EXP_DEVICE_ATTR_RX_HASH | + IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS | + IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN | ++ IBV_EXP_DEVICE_ATTR_TSO_CAPS | + 0; + + DEBUG("using port %u (%08" PRIx32 ")", port, test); +@@ -580,11 +587,22 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + + priv_get_num_vfs(priv, &num_vfs); + priv->sriov = (num_vfs || sriov); ++ priv->tso = ((priv->tso) && ++ (exp_device_attr.tso_caps.max_tso > 0) && ++ (exp_device_attr.tso_caps.supported_qpts & ++ (1 << IBV_QPT_RAW_ETH))); ++ if (priv->tso) ++ priv->max_tso_payload_sz = ++ exp_device_attr.tso_caps.max_tso; + if (priv->mps && !mps) { + ERROR("multi-packet send not supported on this device" + " (" MLX5_TXQ_MPW_EN ")"); + err = ENOTSUP; + goto port_error; ++ } else if (priv->mps && priv->tso) { ++ WARN("multi-packet send not supported in conjunction " ++ "with TSO. MPS disabled"); ++ priv->mps = 0; + } + /* Allocate and register default RSS hash keys. */ + priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n, +diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h +index 4c4b9d4..93f129b 100644 +--- a/drivers/net/mlx5/mlx5.h ++++ b/drivers/net/mlx5/mlx5.h +@@ -126,6 +126,8 @@ struct priv { + unsigned int mps:1; /* Whether multi-packet send is supported. */ + unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */ + unsigned int pending_alarm:1; /* An alarm is pending. */ ++ unsigned int tso:1; /* Whether TSO is supported. */ ++ unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */ + unsigned int txq_inline; /* Maximum packet size for inlining. */ + unsigned int txqs_inline; /* Queue number threshold for inlining. */ + /* RX/TX queues. */ +diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h +index e91d245..eecb908 100644 +--- a/drivers/net/mlx5/mlx5_defs.h ++++ b/drivers/net/mlx5/mlx5_defs.h +@@ -79,4 +79,7 @@ + /* Maximum number of extended statistics counters. */ + #define MLX5_MAX_XSTATS 32 + ++/* Maximum Packet headers size (L2+L3+L4) for TSO. */ ++#define MLX5_MAX_TSO_HEADER 128 ++ + #endif /* RTE_PMD_MLX5_DEFS_H_ */ +diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c +index 5677f03..5542193 100644 +--- a/drivers/net/mlx5/mlx5_ethdev.c ++++ b/drivers/net/mlx5/mlx5_ethdev.c +@@ -693,6 +693,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) + (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM); ++ if (priv->tso) ++ info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + if (priv_get_ifname(priv, &ifname) == 0) + info->if_index = if_nametoindex(ifname); + /* FIXME: RETA update/query API expects the callee to know the size of +diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c +index 4d5455b..98889f6 100644 +--- a/drivers/net/mlx5/mlx5_rxtx.c ++++ b/drivers/net/mlx5/mlx5_rxtx.c +@@ -365,6 +365,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) + const unsigned int elts_n = 1 << txq->elts_n; + unsigned int i = 0; + unsigned int j = 0; ++ unsigned int k = 0; + unsigned int max; + uint16_t max_wqe; + unsigned int comp; +@@ -392,8 +393,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) + uintptr_t addr; + uint64_t naddr; + uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2; ++ uint16_t tso_header_sz = 0; + uint16_t ehdr; + uint8_t cs_flags = 0; ++ uint64_t tso = 0; + #ifdef MLX5_PMD_SOFT_COUNTERS + uint32_t total_length = 0; + #endif +@@ -465,14 +468,74 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) + length -= pkt_inline_sz; + addr += pkt_inline_sz; + } ++ if (txq->tso_en) { ++ tso = buf->ol_flags & PKT_TX_TCP_SEG; ++ if (tso) { ++ uintptr_t end = (uintptr_t) ++ (((uintptr_t)txq->wqes) + ++ (1 << txq->wqe_n) * ++ MLX5_WQE_SIZE); ++ unsigned int copy_b; ++ uint8_t vlan_sz = (buf->ol_flags & ++ PKT_TX_VLAN_PKT) ? 4 : 0; ++ ++ tso_header_sz = buf->l2_len + vlan_sz + ++ buf->l3_len + buf->l4_len; ++ ++ if (unlikely(tso_header_sz > ++ MLX5_MAX_TSO_HEADER)) ++ break; ++ copy_b = tso_header_sz - pkt_inline_sz; ++ /* First seg must contain all headers. */ ++ assert(copy_b <= length); ++ raw += MLX5_WQE_DWORD_SIZE; ++ if (copy_b && ++ ((end - (uintptr_t)raw) > copy_b)) { ++ uint16_t n = (MLX5_WQE_DS(copy_b) - ++ 1 + 3) / 4; ++ ++ if (unlikely(max_wqe < n)) ++ break; ++ max_wqe -= n; ++ rte_memcpy((void *)raw, ++ (void *)addr, copy_b); ++ addr += copy_b; ++ length -= copy_b; ++ pkt_inline_sz += copy_b; ++ /* ++ * Another DWORD will be added ++ * in the inline part. ++ */ ++ raw += MLX5_WQE_DS(copy_b) * ++ MLX5_WQE_DWORD_SIZE - ++ MLX5_WQE_DWORD_SIZE; ++ } else { ++ /* NOP WQE. */ ++ wqe->ctrl = (rte_v128u32_t){ ++ htonl(txq->wqe_ci << 8), ++ htonl(txq->qp_num_8s | 1), ++ 0, ++ 0, ++ }; ++ ds = 1; ++ total_length = 0; ++ pkts--; ++ pkts_n++; ++ elts_head = (elts_head - 1) & ++ (elts_n - 1); ++ k++; ++ goto next_wqe; ++ } ++ } ++ } + /* Inline if enough room. */ +- if (txq->max_inline) { ++ if (txq->inline_en || tso) { + uintptr_t end = (uintptr_t) + (((uintptr_t)txq->wqes) + + (1 << txq->wqe_n) * MLX5_WQE_SIZE); + unsigned int max_inline = txq->max_inline * + RTE_CACHE_LINE_SIZE - +- MLX5_WQE_DWORD_SIZE; ++ (pkt_inline_sz - 2); + uintptr_t addr_end = (addr + max_inline) & + ~(RTE_CACHE_LINE_SIZE - 1); + unsigned int copy_b = (addr_end > addr) ? +@@ -491,6 +554,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) + if (unlikely(max_wqe < n)) + break; + max_wqe -= n; ++ if (tso) { ++ uint32_t inl = ++ htonl(copy_b | MLX5_INLINE_SEG); ++ ++ pkt_inline_sz = ++ MLX5_WQE_DS(tso_header_sz) * ++ MLX5_WQE_DWORD_SIZE; ++ rte_memcpy((void *)raw, ++ (void *)&inl, sizeof(inl)); ++ raw += sizeof(inl); ++ pkt_inline_sz += sizeof(inl); ++ } + rte_memcpy((void *)raw, (void *)addr, copy_b); + addr += copy_b; + length -= copy_b; +@@ -591,18 +666,34 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) + next_pkt: + ++i; + /* Initialize known and common part of the WQE structure. */ +- wqe->ctrl = (rte_v128u32_t){ +- htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), +- htonl(txq->qp_num_8s | ds), +- 0, +- 0, +- }; +- wqe->eseg = (rte_v128u32_t){ +- 0, +- cs_flags, +- 0, +- (ehdr << 16) | htons(pkt_inline_sz), +- }; ++ if (tso) { ++ wqe->ctrl = (rte_v128u32_t){ ++ htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO), ++ htonl(txq->qp_num_8s | ds), ++ 0, ++ 0, ++ }; ++ wqe->eseg = (rte_v128u32_t){ ++ 0, ++ cs_flags | (htons(buf->tso_segsz) << 16), ++ 0, ++ (ehdr << 16) | htons(tso_header_sz), ++ }; ++ } else { ++ wqe->ctrl = (rte_v128u32_t){ ++ htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), ++ htonl(txq->qp_num_8s | ds), ++ 0, ++ 0, ++ }; ++ wqe->eseg = (rte_v128u32_t){ ++ 0, ++ cs_flags, ++ 0, ++ (ehdr << 16) | htons(pkt_inline_sz), ++ }; ++ } ++next_wqe: + txq->wqe_ci += (ds + 3) / 4; + #ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment sent bytes counter. */ +@@ -610,10 +701,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) + #endif + } while (pkts_n); + /* Take a shortcut if nothing must be sent. */ +- if (unlikely(i == 0)) ++ if (unlikely((i + k) == 0)) + return 0; + /* Check whether completion threshold has been reached. */ +- comp = txq->elts_comp + i + j; ++ comp = txq->elts_comp + i + j + k; + if (comp >= MLX5_TX_COMP_THRESH) { + volatile struct mlx5_wqe_ctrl *w = + (volatile struct mlx5_wqe_ctrl *)wqe; +diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h +index 41a34d7..6b328cf 100644 +--- a/drivers/net/mlx5/mlx5_rxtx.h ++++ b/drivers/net/mlx5/mlx5_rxtx.h +@@ -254,6 +254,8 @@ struct txq { + uint16_t cqe_n:4; /* Number of CQ elements (in log2). */ + uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */ + uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ ++ uint16_t inline_en:1; /* When set inline is enabled. */ ++ uint16_t tso_en:1; /* When set hardware TSO is enabled. */ + uint32_t qp_num_8s; /* QP number shifted by 8. */ + volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ + volatile void *wqes; /* Work queue (use volatile to write into). */ +diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c +index 949035b..995b763 100644 +--- a/drivers/net/mlx5/mlx5_txq.c ++++ b/drivers/net/mlx5/mlx5_txq.c +@@ -342,6 +342,19 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, + RTE_CACHE_LINE_SIZE); + attr.init.cap.max_inline_data = + tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE; ++ tmpl.txq.inline_en = 1; ++ } ++ if (priv->tso) { ++ uint16_t max_tso_inline = ((MLX5_MAX_TSO_HEADER + ++ (RTE_CACHE_LINE_SIZE - 1)) / ++ RTE_CACHE_LINE_SIZE); ++ ++ attr.init.max_tso_header = ++ max_tso_inline * RTE_CACHE_LINE_SIZE; ++ attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER; ++ tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline, ++ max_tso_inline); ++ tmpl.txq.tso_en = 1; + } + tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); + if (tmpl.qp == NULL) { +-- +2.7.4 + diff --git a/dpdk/dpdk-17.02_patches/0007-add-hardware-checksum-offload-for-tunnel-pa.patch b/dpdk/dpdk-17.02_patches/0007-add-hardware-checksum-offload-for-tunnel-pa.patch new file mode 100644 index 00000000..bbcce486 --- /dev/null +++ b/dpdk/dpdk-17.02_patches/0007-add-hardware-checksum-offload-for-tunnel-pa.patch @@ -0,0 +1,194 @@ +From f0dda2ab16635894b1e3836d0b960b9270a3b491 Mon Sep 17 00:00:00 2001 +From: Shahaf Shuler +Date: Thu, 2 Mar 2017 11:05:44 +0200 +Subject: [PATCH] net/mlx5: add hardware checksum offload for tunnel packets + +Prior to this commit Tx checksum offload was supported only for the +inner headers. +This commit adds support for the hardware to compute the checksum for the +outer headers as well. + +The support is for tunneling protocols GRE and VXLAN. + +Signed-off-by: Shahaf Shuler +Acked-by: Nelio Laranjeiro +--- + doc/guides/nics/features/mlx5.ini | 2 ++ + doc/guides/nics/mlx5.rst | 3 ++- + drivers/net/mlx5/mlx5.c | 7 +++++++ + drivers/net/mlx5/mlx5.h | 2 ++ + drivers/net/mlx5/mlx5_ethdev.c | 2 ++ + drivers/net/mlx5/mlx5_prm.h | 6 ++++++ + drivers/net/mlx5/mlx5_rxtx.c | 14 +++++++++++++- + drivers/net/mlx5/mlx5_rxtx.h | 2 ++ + drivers/net/mlx5/mlx5_txq.c | 2 ++ + 9 files changed, 38 insertions(+), 2 deletions(-) + +diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini +index 8df25ce..1814f82 100644 +--- a/doc/guides/nics/features/mlx5.ini ++++ b/doc/guides/nics/features/mlx5.ini +@@ -27,6 +27,8 @@ CRC offload = Y + VLAN offload = Y + L3 checksum offload = Y + L4 checksum offload = Y ++Inner L3 checksum = Y ++Inner L4 checksum = Y + Packet type parsing = Y + Basic stats = Y + Stats per queue = Y +diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst +index 9b0ba29..41f3a47 100644 +--- a/doc/guides/nics/mlx5.rst ++++ b/doc/guides/nics/mlx5.rst +@@ -91,13 +91,14 @@ Features + - KVM and VMware ESX SR-IOV modes are supported. + - RSS hash result is supported. + - Hardware TSO. ++- Hardware checksum TX offload for VXLAN and GRE. + + Limitations + ----------- + + - Inner RSS for VXLAN frames is not supported yet. + - Port statistics through software counters only. +-- Hardware checksum offloads for VXLAN inner header are not supported yet. ++- Hardware checksum RX offloads for VXLAN inner header are not supported yet. + - Secondary process RX is not supported. + + Configuration +diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c +index 03ed3b3..6f42948 100644 +--- a/drivers/net/mlx5/mlx5.c ++++ b/drivers/net/mlx5/mlx5.c +@@ -375,6 +375,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + struct ibv_device_attr device_attr; + unsigned int sriov; + unsigned int mps; ++ unsigned int tunnel_en; + int idx; + int i; + +@@ -429,12 +430,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + * as all ConnectX-5 devices. + */ + switch (pci_dev->id.device_id) { ++ case PCI_DEVICE_ID_MELLANOX_CONNECTX4: ++ tunnel_en = 1; ++ mps = 0; ++ break; + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: + mps = 1; ++ tunnel_en = 1; + break; + default: + mps = 0; +@@ -539,6 +545,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + priv->mtu = ETHER_MTU; + priv->mps = mps; /* Enable MPW by default if supported. */ + priv->cqe_comp = 1; /* Enable compression by default. */ ++ priv->tunnel_en = tunnel_en; + err = mlx5_args(priv, pci_dev->device.devargs); + if (err) { + ERROR("failed to process device arguments: %s", +diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h +index 93f129b..870e01f 100644 +--- a/drivers/net/mlx5/mlx5.h ++++ b/drivers/net/mlx5/mlx5.h +@@ -127,6 +127,8 @@ struct priv { + unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */ + unsigned int pending_alarm:1; /* An alarm is pending. */ + unsigned int tso:1; /* Whether TSO is supported. */ ++ unsigned int tunnel_en:1; ++ /* Whether Tx offloads for tunneled packets are supported. */ + unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */ + unsigned int txq_inline; /* Maximum packet size for inlining. */ + unsigned int txqs_inline; /* Queue number threshold for inlining. */ +diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c +index 5542193..8be9e77 100644 +--- a/drivers/net/mlx5/mlx5_ethdev.c ++++ b/drivers/net/mlx5/mlx5_ethdev.c +@@ -695,6 +695,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) + DEV_TX_OFFLOAD_TCP_CKSUM); + if (priv->tso) + info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; ++ if (priv->tunnel_en) ++ info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (priv_get_ifname(priv, &ifname) == 0) + info->if_index = if_nametoindex(ifname); + /* FIXME: RETA update/query API expects the callee to know the size of +diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h +index 3318668..0a77f5b 100644 +--- a/drivers/net/mlx5/mlx5_prm.h ++++ b/drivers/net/mlx5/mlx5_prm.h +@@ -120,6 +120,12 @@ + /* Tunnel packet bit in the CQE. */ + #define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0) + ++/* Inner L3 checksum offload (Tunneled packets only). */ ++#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4) ++ ++/* Inner L4 checksum offload (Tunneled packets only). */ ++#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5) ++ + /* INVALID is used by packets matching no flow rules. */ + #define MLX5_FLOW_MARK_INVALID 0 + +diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c +index 98889f6..c2eb891 100644 +--- a/drivers/net/mlx5/mlx5_rxtx.c ++++ b/drivers/net/mlx5/mlx5_rxtx.c +@@ -443,7 +443,19 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) + /* Should we enable HW CKSUM offload */ + if (buf->ol_flags & + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { +- cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; ++ const uint64_t is_tunneled = buf->ol_flags & ++ (PKT_TX_TUNNEL_GRE | ++ PKT_TX_TUNNEL_VXLAN); ++ ++ if (is_tunneled && txq->tunnel_en) { ++ cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | ++ MLX5_ETH_WQE_L4_INNER_CSUM; ++ if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) ++ cs_flags |= MLX5_ETH_WQE_L3_CSUM; ++ } else { ++ cs_flags = MLX5_ETH_WQE_L3_CSUM | ++ MLX5_ETH_WQE_L4_CSUM; ++ } + } + raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; + /* Replace the Ethernet type by the VLAN if necessary. */ +diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h +index 6b328cf..9669564 100644 +--- a/drivers/net/mlx5/mlx5_rxtx.h ++++ b/drivers/net/mlx5/mlx5_rxtx.h +@@ -256,6 +256,8 @@ struct txq { + uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ + uint16_t inline_en:1; /* When set inline is enabled. */ + uint16_t tso_en:1; /* When set hardware TSO is enabled. */ ++ uint16_t tunnel_en:1; ++ /* When set TX offload for tunneled packets are supported. */ + uint32_t qp_num_8s; /* QP number shifted by 8. */ + volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ + volatile void *wqes; /* Work queue (use volatile to write into). */ +diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c +index 995b763..9d0c00f 100644 +--- a/drivers/net/mlx5/mlx5_txq.c ++++ b/drivers/net/mlx5/mlx5_txq.c +@@ -356,6 +356,8 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, + max_tso_inline); + tmpl.txq.tso_en = 1; + } ++ if (priv->tunnel_en) ++ tmpl.txq.tunnel_en = 1; + tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); + if (tmpl.qp == NULL) { + ret = (errno ? errno : EINVAL); +-- +2.7.4 + diff --git a/dpdk/dpdk-17.02_patches/0008-net-mlx5-add-enhanced-multi-packet-send-for-ConnectX.patch b/dpdk/dpdk-17.02_patches/0008-net-mlx5-add-enhanced-multi-packet-send-for-ConnectX.patch new file mode 100644 index 00000000..6ff076c7 --- /dev/null +++ b/dpdk/dpdk-17.02_patches/0008-net-mlx5-add-enhanced-multi-packet-send-for-ConnectX.patch @@ -0,0 +1,809 @@ +From 7ca5c8de65acabe4cb60960adcfa9247efdd2a5c Mon Sep 17 00:00:00 2001 +From: Yongseok Koh +Date: Wed, 15 Mar 2017 16:55:44 -0700 +Subject: [PATCH] net/mlx5: add enhanced multi-packet send for ConnectX-5 + +ConnectX-5 supports enhanced version of multi-packet send (MPS). An MPS Tx +descriptor can carry multiple packets either by including pointers of +packets or by inlining packets. Inlining packet data can be helpful to +better utilize PCIe bandwidth. In addition, Enhanced MPS supports hybrid +mode - mixing inlined packets and pointers in a descriptor. This feature is +enabled by default if supported by HW. + +Signed-off-by: Yongseok Koh +--- + doc/guides/nics/mlx5.rst | 31 +++- + drivers/net/mlx5/mlx5.c | 37 +++- + drivers/net/mlx5/mlx5.h | 4 +- + drivers/net/mlx5/mlx5_defs.h | 7 + + drivers/net/mlx5/mlx5_ethdev.c | 6 +- + drivers/net/mlx5/mlx5_prm.h | 20 ++ + drivers/net/mlx5/mlx5_rxtx.c | 410 +++++++++++++++++++++++++++++++++++++++++ + drivers/net/mlx5/mlx5_rxtx.h | 7 +- + drivers/net/mlx5/mlx5_txq.c | 29 ++- + 9 files changed, 534 insertions(+), 17 deletions(-) + +diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst +index 41f3a47..0783aeb 100644 +--- a/doc/guides/nics/mlx5.rst ++++ b/doc/guides/nics/mlx5.rst +@@ -183,10 +183,17 @@ Run-time configuration + + - ``txq_mpw_en`` parameter [int] + +- A nonzero value enables multi-packet send. This feature allows the TX +- burst function to pack up to five packets in two descriptors in order to +- save PCI bandwidth and improve performance at the cost of a slightly +- higher CPU usage. ++ A nonzero value enables multi-packet send (MPS) for ConnectX-4 Lx and ++ enhanced multi-packet send (Enhanced MPS) for ConnectX-5. MPS allows the ++ TX burst function to pack up multiple packets in a single descriptor ++ session in order to save PCI bandwidth and improve performance at the ++ cost of a slightly higher CPU usage. When ``txq_inline`` is set along ++ with ``txq_mpw_en``, TX burst function tries to copy entire packet data ++ on to TX descriptor instead of including pointer of packet only if there ++ is enough room remained in the descriptor. ``txq_inline`` sets ++ per-descriptor space for either pointers or inlined packets. In addition, ++ Enhanced MPS supports hybrid mode - mixing inlined packets and pointers ++ in the same descriptor. + + This option cannot be used in conjunction with ``tso`` below. When ``tso`` + is set, ``txq_mpw_en`` is disabled. +@@ -194,6 +201,22 @@ Run-time configuration + It is currently only supported on the ConnectX-4 Lx and ConnectX-5 + families of adapters. Enabled by default. + ++- ``txq_mpw_hdr_dseg_en`` parameter [int] ++ ++ A nonzero value enables including two pointers in the first block of TX ++ descriptor. This can be used to lessen CPU load for memory copy. ++ ++ Effective only when Enhanced MPS is supported. Disabled by default. ++ ++- ``txq_max_inline_len`` parameter [int] ++ ++ Maximum size of packet to be inlined. This limits the size of packet to ++ be inlined. If the size of a packet is larger than configured value, the ++ packet isn't inlined even though there's enough space remained in the ++ descriptor. Instead, the packet is included with pointer. ++ ++ Effective only when Enhanced MPS is supported. The default value is 256. ++ + - ``tso`` parameter [int] + + A nonzero value enables hardware TSO. +diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c +index ebc7984..bc6a34f 100644 +--- a/drivers/net/mlx5/mlx5.c ++++ b/drivers/net/mlx5/mlx5.c +@@ -84,6 +84,12 @@ + /* Device parameter to enable multi-packet send WQEs. */ + #define MLX5_TXQ_MPW_EN "txq_mpw_en" + ++/* Device parameter to include 2 dsegs in the title WQEBB. */ ++#define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" ++ ++/* Device parameter to limit the size of inlining packet. */ ++#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" ++ + /* Device parameter to enable hardware TSO offload. */ + #define MLX5_TSO "tso" + +@@ -294,7 +300,11 @@ mlx5_args_check(const char *key, const char *val, void *opaque) + } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { + priv->txqs_inline = tmp; + } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { +- priv->mps &= !!tmp; /* Enable MPW only if HW supports */ ++ priv->mps = !!tmp ? priv->mps : MLX5_MPW_DISABLED; ++ } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { ++ priv->mpw_hdr_dseg = !!tmp; ++ } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { ++ priv->inline_max_packet_sz = tmp; + } else if (strcmp(MLX5_TSO, key) == 0) { + priv->tso = !!tmp; + } else { +@@ -323,6 +333,8 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs) + MLX5_TXQ_INLINE, + MLX5_TXQS_MIN_INLINE, + MLX5_TXQ_MPW_EN, ++ MLX5_TXQ_MPW_HDR_DSEG_EN, ++ MLX5_TXQ_MAX_INLINE_LEN, + MLX5_TSO, + NULL, + }; +@@ -434,24 +446,27 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + switch (pci_dev->id.device_id) { + case PCI_DEVICE_ID_MELLANOX_CONNECTX4: + tunnel_en = 1; +- mps = 0; ++ mps = MLX5_MPW_DISABLED; + break; + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: ++ mps = MLX5_MPW; ++ break; + case PCI_DEVICE_ID_MELLANOX_CONNECTX5: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: +- mps = 1; + tunnel_en = 1; ++ mps = MLX5_MPW_ENHANCED; + break; + default: +- mps = 0; ++ mps = MLX5_MPW_DISABLED; + } + INFO("PCI information matches, using device \"%s\"" +- " (SR-IOV: %s, MPS: %s)", ++ " (SR-IOV: %s, %sMPS: %s)", + list[i]->name, + sriov ? "true" : "false", +- mps ? "true" : "false"); ++ mps == MLX5_MPW_ENHANCED ? "Enhanced " : "", ++ mps != MLX5_MPW_DISABLED ? "true" : "false"); + attr_ctx = ibv_open_device(list[i]); + err = errno; + break; +@@ -546,6 +561,13 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + priv->pd = pd; + priv->mtu = ETHER_MTU; + priv->mps = mps; /* Enable MPW by default if supported. */ ++ /* Set default values for Enhanced MPW, a.k.a MPWv2. */ ++ if (mps == MLX5_MPW_ENHANCED) { ++ priv->mpw_hdr_dseg = 0; ++ priv->txqs_inline = MLX5_EMPW_MIN_TXQS; ++ priv->inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN; ++ priv->txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE; ++ } + priv->cqe_comp = 1; /* Enable compression by default. */ + priv->tunnel_en = tunnel_en; + err = mlx5_args(priv, pci_dev->device.devargs); +@@ -613,6 +635,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + "with TSO. MPS disabled"); + priv->mps = 0; + } ++ INFO("%sMPS is %s", ++ priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "", ++ priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); + /* Allocate and register default RSS hash keys. */ + priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n, + sizeof((*priv->rss_conf)[0]), 0); +diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h +index 870e01f..d26d465 100644 +--- a/drivers/net/mlx5/mlx5.h ++++ b/drivers/net/mlx5/mlx5.h +@@ -123,7 +123,8 @@ struct priv { + unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ + unsigned int hw_padding:1; /* End alignment padding is supported. */ + unsigned int sriov:1; /* This is a VF or PF with VF devices. */ +- unsigned int mps:1; /* Whether multi-packet send is supported. */ ++ unsigned int mps:2; /* Multi-packet send mode (0: disabled). */ ++ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ + unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */ + unsigned int pending_alarm:1; /* An alarm is pending. */ + unsigned int tso:1; /* Whether TSO is supported. */ +@@ -132,6 +133,7 @@ struct priv { + unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */ + unsigned int txq_inline; /* Maximum packet size for inlining. */ + unsigned int txqs_inline; /* Queue number threshold for inlining. */ ++ unsigned int inline_max_packet_sz; /* Max packet size for inlining. */ + /* RX/TX queues. */ + unsigned int rxqs_n; /* RX queues array size. */ + unsigned int txqs_n; /* TX queues array size. */ +diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h +index eecb908..201bb33 100644 +--- a/drivers/net/mlx5/mlx5_defs.h ++++ b/drivers/net/mlx5/mlx5_defs.h +@@ -55,6 +55,13 @@ + #define MLX5_TX_COMP_THRESH 32 + + /* ++ * Request TX completion every time the total number of WQEBBs used for inlining ++ * packets exceeds the size of WQ divided by this divisor. Better to be power of ++ * two for performance. ++ */ ++#define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3) ++ ++/* + * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP + * from which buffers are to be transmitted will have to be mapped by this + * driver to their own Memory Region (MR). This is a slow operation. +diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c +index 5deb6e8..dd5fe5c 100644 +--- a/drivers/net/mlx5/mlx5_ethdev.c ++++ b/drivers/net/mlx5/mlx5_ethdev.c +@@ -1590,7 +1590,11 @@ priv_select_tx_function(struct priv *priv) + { + priv->dev->tx_pkt_burst = mlx5_tx_burst; + /* Select appropriate TX function. */ +- if (priv->mps && priv->txq_inline) { ++ if (priv->mps == MLX5_MPW_ENHANCED) { ++ priv->dev->tx_pkt_burst = ++ mlx5_tx_burst_empw; ++ DEBUG("selected Enhanced MPW TX function"); ++ } else if (priv->mps && priv->txq_inline) { + priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline; + DEBUG("selected MPW inline TX function"); + } else if (priv->mps) { +diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h +index 0a77f5b..155bdba 100644 +--- a/drivers/net/mlx5/mlx5_prm.h ++++ b/drivers/net/mlx5/mlx5_prm.h +@@ -73,6 +73,9 @@ + /* WQE size */ + #define MLX5_WQE_SIZE (4 * MLX5_WQE_DWORD_SIZE) + ++/* Max size of a WQE session. */ ++#define MLX5_WQE_SIZE_MAX 960U ++ + /* Compute the number of DS. */ + #define MLX5_WQE_DS(n) \ + (((n) + MLX5_WQE_DWORD_SIZE - 1) / MLX5_WQE_DWORD_SIZE) +@@ -80,10 +83,19 @@ + /* Room for inline data in multi-packet WQE. */ + #define MLX5_MWQE64_INL_DATA 28 + ++/* Default minimum number of Tx queues for inlining packets. */ ++#define MLX5_EMPW_MIN_TXQS 8 ++ ++/* Default max packet length to be inlined. */ ++#define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE) ++ + #ifndef HAVE_VERBS_MLX5_OPCODE_TSO + #define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */ + #endif + ++#define MLX5_OPC_MOD_ENHANCED_MPSW 0 ++#define MLX5_OPCODE_ENHANCED_MPSW 0x29 ++ + /* CQE value to inform that VLAN is stripped. */ + #define MLX5_CQE_VLAN_STRIPPED (1u << 0) + +@@ -176,10 +188,18 @@ struct mlx5_wqe64 { + uint8_t raw[32]; + } __rte_aligned(MLX5_WQE_SIZE); + ++/* MPW mode. */ ++enum mlx5_mpw_mode { ++ MLX5_MPW_DISABLED, ++ MLX5_MPW, ++ MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */ ++}; ++ + /* MPW session status. */ + enum mlx5_mpw_state { + MLX5_MPW_STATE_OPENED, + MLX5_MPW_INL_STATE_OPENED, ++ MLX5_MPW_ENHANCED_STATE_OPENED, + MLX5_MPW_STATE_CLOSED, + }; + +diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c +index 9fc433e..a1dd84a 100644 +--- a/drivers/net/mlx5/mlx5_rxtx.c ++++ b/drivers/net/mlx5/mlx5_rxtx.c +@@ -195,6 +195,62 @@ tx_mlx5_wqe(struct txq *txq, uint16_t ci) + } + + /** ++ * Return the size of tailroom of WQ. ++ * ++ * @param txq ++ * Pointer to TX queue structure. ++ * @param addr ++ * Pointer to tail of WQ. ++ * ++ * @return ++ * Size of tailroom. ++ */ ++static inline size_t ++tx_mlx5_wq_tailroom(struct txq *txq, void *addr) ++{ ++ size_t tailroom; ++ tailroom = (uintptr_t)(txq->wqes) + ++ (1 << txq->wqe_n) * MLX5_WQE_SIZE - ++ (uintptr_t)addr; ++ return tailroom; ++} ++ ++/** ++ * Copy data to tailroom of circular queue. ++ * ++ * @param dst ++ * Pointer to destination. ++ * @param src ++ * Pointer to source. ++ * @param n ++ * Number of bytes to copy. ++ * @param base ++ * Pointer to head of queue. ++ * @param tailroom ++ * Size of tailroom from dst. ++ * ++ * @return ++ * Pointer after copied data. ++ */ ++static inline void * ++mlx5_copy_to_wq(void *dst, const void *src, size_t n, ++ void *base, size_t tailroom) ++{ ++ void *ret; ++ ++ if (n > tailroom) { ++ rte_memcpy(dst, src, tailroom); ++ rte_memcpy(base, (void *)((uintptr_t)src + tailroom), ++ n - tailroom); ++ ret = (uint8_t *)base + n - tailroom; ++ } else { ++ rte_memcpy(dst, src, n); ++ ret = (n == tailroom) ? base : (uint8_t *)dst + n; ++ } ++ return ret; ++} ++ ++/** + * Manage TX completions. + * + * When sending a burst, mlx5_tx_burst() posts several WRs. +@@ -1269,6 +1325,360 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, + } + + /** ++ * Open an Enhanced MPW session. ++ * ++ * @param txq ++ * Pointer to TX queue structure. ++ * @param mpw ++ * Pointer to MPW session structure. ++ * @param length ++ * Packet length. ++ */ ++static inline void ++mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding) ++{ ++ uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1); ++ ++ mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED; ++ mpw->pkts_n = 0; ++ mpw->total_len = sizeof(struct mlx5_wqe); ++ mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); ++ mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | ++ (txq->wqe_ci << 8) | ++ MLX5_OPCODE_ENHANCED_MPSW); ++ mpw->wqe->ctrl[2] = 0; ++ mpw->wqe->ctrl[3] = 0; ++ memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE); ++ if (unlikely(padding)) { ++ uintptr_t addr = (uintptr_t)(mpw->wqe + 1); ++ ++ /* Pad the first 2 DWORDs with zero-length inline header. */ ++ *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG); ++ *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) = ++ htonl(MLX5_INLINE_SEG); ++ mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE; ++ /* Start from the next WQEBB. */ ++ mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1)); ++ } else { ++ mpw->data.raw = (volatile void *)(mpw->wqe + 1); ++ } ++} ++ ++/** ++ * Close an Enhanced MPW session. ++ * ++ * @param txq ++ * Pointer to TX queue structure. ++ * @param mpw ++ * Pointer to MPW session structure. ++ * ++ * @return ++ * Number of consumed WQEs. ++ */ ++static inline uint16_t ++mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw) ++{ ++ uint16_t ret; ++ ++ /* Store size in multiple of 16 bytes. Control and Ethernet segments ++ * count as 2. ++ */ ++ mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len)); ++ mpw->state = MLX5_MPW_STATE_CLOSED; ++ ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; ++ txq->wqe_ci += ret; ++ return ret; ++} ++ ++/** ++ * DPDK callback for TX with Enhanced MPW support. ++ * ++ * @param dpdk_txq ++ * Generic pointer to TX queue structure. ++ * @param[in] pkts ++ * Packets to transmit. ++ * @param pkts_n ++ * Number of packets in array. ++ * ++ * @return ++ * Number of packets successfully transmitted (<= pkts_n). ++ */ ++uint16_t ++mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) ++{ ++ struct txq *txq = (struct txq *)dpdk_txq; ++ uint16_t elts_head = txq->elts_head; ++ const unsigned int elts_n = 1 << txq->elts_n; ++ unsigned int i = 0; ++ unsigned int j = 0; ++ unsigned int max_elts; ++ uint16_t max_wqe; ++ unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE; ++ unsigned int mpw_room = 0; ++ unsigned int inl_pad = 0; ++ uint32_t inl_hdr; ++ struct mlx5_mpw mpw = { ++ .state = MLX5_MPW_STATE_CLOSED, ++ }; ++ ++ if (unlikely(!pkts_n)) ++ return 0; ++ /* Start processing. */ ++ txq_complete(txq); ++ max_elts = (elts_n - (elts_head - txq->elts_tail)); ++ if (max_elts > elts_n) ++ max_elts -= elts_n; ++ /* A CQE slot must always be available. */ ++ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); ++ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); ++ if (unlikely(!max_wqe)) ++ return 0; ++ do { ++ struct rte_mbuf *buf = *(pkts++); ++ unsigned int elts_head_next; ++ uintptr_t addr; ++ uint64_t naddr; ++ unsigned int n; ++ unsigned int do_inline = 0; /* Whether inline is possible. */ ++ uint32_t length; ++ unsigned int segs_n = buf->nb_segs; ++ uint32_t cs_flags = 0; ++ ++ /* ++ * Make sure there is enough room to store this packet and ++ * that one ring entry remains unused. ++ */ ++ assert(segs_n); ++ if (max_elts - j < segs_n + 1) ++ break; ++ /* Do not bother with large packets MPW cannot handle. */ ++ if (segs_n > MLX5_MPW_DSEG_MAX) ++ break; ++ /* Should we enable HW CKSUM offload. */ ++ if (buf->ol_flags & ++ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) ++ cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; ++ /* Retrieve packet information. */ ++ length = PKT_LEN(buf); ++ /* Start new session if: ++ * - multi-segment packet ++ * - no space left even for a dseg ++ * - next packet can be inlined with a new WQE ++ * - cs_flag differs ++ * It can't be MLX5_MPW_STATE_OPENED as always have a single ++ * segmented packet. ++ */ ++ if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) { ++ if ((segs_n != 1) || ++ (inl_pad + sizeof(struct mlx5_wqe_data_seg) > ++ mpw_room) || ++ (length <= txq->inline_max_packet_sz && ++ inl_pad + sizeof(inl_hdr) + length > ++ mpw_room) || ++ (mpw.wqe->eseg.cs_flags != cs_flags)) ++ max_wqe -= mlx5_empw_close(txq, &mpw); ++ } ++ if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) { ++ if (unlikely(segs_n != 1)) { ++ /* Fall back to legacy MPW. ++ * A MPW session consumes 2 WQEs at most to ++ * include MLX5_MPW_DSEG_MAX pointers. ++ */ ++ if (unlikely(max_wqe < 2)) ++ break; ++ mlx5_mpw_new(txq, &mpw, length); ++ } else { ++ /* In Enhanced MPW, inline as much as the budget ++ * is allowed. The remaining space is to be ++ * filled with dsegs. If the title WQEBB isn't ++ * padded, it will have 2 dsegs there. ++ */ ++ mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX, ++ (max_inline ? max_inline : ++ pkts_n * MLX5_WQE_DWORD_SIZE) + ++ MLX5_WQE_SIZE); ++ if (unlikely(max_wqe * MLX5_WQE_SIZE < ++ mpw_room)) ++ break; ++ /* Don't pad the title WQEBB to not waste WQ. */ ++ mlx5_empw_new(txq, &mpw, 0); ++ mpw_room -= mpw.total_len; ++ inl_pad = 0; ++ do_inline = ++ length <= txq->inline_max_packet_sz && ++ sizeof(inl_hdr) + length <= mpw_room && ++ !txq->mpw_hdr_dseg; ++ } ++ mpw.wqe->eseg.cs_flags = cs_flags; ++ } else { ++ /* Evaluate whether the next packet can be inlined. ++ * Inlininig is possible when: ++ * - length is less than configured value ++ * - length fits for remaining space ++ * - not required to fill the title WQEBB with dsegs ++ */ ++ do_inline = ++ length <= txq->inline_max_packet_sz && ++ inl_pad + sizeof(inl_hdr) + length <= ++ mpw_room && ++ (!txq->mpw_hdr_dseg || ++ mpw.total_len >= MLX5_WQE_SIZE); ++ } ++ /* Multi-segment packets must be alone in their MPW. */ ++ assert((segs_n == 1) || (mpw.pkts_n == 0)); ++ if (unlikely(mpw.state == MLX5_MPW_STATE_OPENED)) { ++#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) ++ length = 0; ++#endif ++ do { ++ volatile struct mlx5_wqe_data_seg *dseg; ++ ++ elts_head_next = ++ (elts_head + 1) & (elts_n - 1); ++ assert(buf); ++ (*txq->elts)[elts_head] = buf; ++ dseg = mpw.data.dseg[mpw.pkts_n]; ++ addr = rte_pktmbuf_mtod(buf, uintptr_t); ++ *dseg = (struct mlx5_wqe_data_seg){ ++ .byte_count = htonl(DATA_LEN(buf)), ++ .lkey = txq_mp2mr(txq, txq_mb2mp(buf)), ++ .addr = htonll(addr), ++ }; ++ elts_head = elts_head_next; ++#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) ++ length += DATA_LEN(buf); ++#endif ++ buf = buf->next; ++ ++j; ++ ++mpw.pkts_n; ++ } while (--segs_n); ++ /* A multi-segmented packet takes one MPW session. ++ * TODO: Pack more multi-segmented packets if possible. ++ */ ++ mlx5_mpw_close(txq, &mpw); ++ if (mpw.pkts_n < 3) ++ max_wqe--; ++ else ++ max_wqe -= 2; ++ } else if (do_inline) { ++ /* Inline packet into WQE. */ ++ unsigned int max; ++ ++ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); ++ assert(length == DATA_LEN(buf)); ++ inl_hdr = htonl(length | MLX5_INLINE_SEG); ++ addr = rte_pktmbuf_mtod(buf, uintptr_t); ++ mpw.data.raw = (volatile void *) ++ ((uintptr_t)mpw.data.raw + inl_pad); ++ max = tx_mlx5_wq_tailroom(txq, ++ (void *)(uintptr_t)mpw.data.raw); ++ /* Copy inline header. */ ++ mpw.data.raw = (volatile void *) ++ mlx5_copy_to_wq( ++ (void *)(uintptr_t)mpw.data.raw, ++ &inl_hdr, ++ sizeof(inl_hdr), ++ (void *)(uintptr_t)txq->wqes, ++ max); ++ max = tx_mlx5_wq_tailroom(txq, ++ (void *)(uintptr_t)mpw.data.raw); ++ /* Copy packet data. */ ++ mpw.data.raw = (volatile void *) ++ mlx5_copy_to_wq( ++ (void *)(uintptr_t)mpw.data.raw, ++ (void *)addr, ++ length, ++ (void *)(uintptr_t)txq->wqes, ++ max); ++ ++mpw.pkts_n; ++ mpw.total_len += (inl_pad + sizeof(inl_hdr) + length); ++ /* No need to get completion as the entire packet is ++ * copied to WQ. Free the buf right away. ++ */ ++ elts_head_next = elts_head; ++ rte_pktmbuf_free_seg(buf); ++ mpw_room -= (inl_pad + sizeof(inl_hdr) + length); ++ /* Add pad in the next packet if any. */ ++ inl_pad = (((uintptr_t)mpw.data.raw + ++ (MLX5_WQE_DWORD_SIZE - 1)) & ++ ~(MLX5_WQE_DWORD_SIZE - 1)) - ++ (uintptr_t)mpw.data.raw; ++ } else { ++ /* No inline. Load a dseg of packet pointer. */ ++ volatile rte_v128u32_t *dseg; ++ ++ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); ++ assert((inl_pad + sizeof(*dseg)) <= mpw_room); ++ assert(length == DATA_LEN(buf)); ++ if (!tx_mlx5_wq_tailroom(txq, ++ (void *)((uintptr_t)mpw.data.raw ++ + inl_pad))) ++ dseg = (volatile void *)txq->wqes; ++ else ++ dseg = (volatile void *) ++ ((uintptr_t)mpw.data.raw + ++ inl_pad); ++ elts_head_next = (elts_head + 1) & (elts_n - 1); ++ (*txq->elts)[elts_head] = buf; ++ addr = rte_pktmbuf_mtod(buf, uintptr_t); ++ for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) ++ rte_prefetch2((void *)(addr + ++ n * RTE_CACHE_LINE_SIZE)); ++ naddr = htonll(addr); ++ *dseg = (rte_v128u32_t) { ++ htonl(length), ++ txq_mp2mr(txq, txq_mb2mp(buf)), ++ naddr, ++ naddr >> 32, ++ }; ++ mpw.data.raw = (volatile void *)(dseg + 1); ++ mpw.total_len += (inl_pad + sizeof(*dseg)); ++ ++j; ++ ++mpw.pkts_n; ++ mpw_room -= (inl_pad + sizeof(*dseg)); ++ inl_pad = 0; ++ } ++ elts_head = elts_head_next; ++#ifdef MLX5_PMD_SOFT_COUNTERS ++ /* Increment sent bytes counter. */ ++ txq->stats.obytes += length; ++#endif ++ ++i; ++ } while (i < pkts_n); ++ /* Take a shortcut if nothing must be sent. */ ++ if (unlikely(i == 0)) ++ return 0; ++ /* Check whether completion threshold has been reached. */ ++ if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH || ++ (uint16_t)(txq->wqe_ci - txq->mpw_comp) >= ++ (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) { ++ volatile struct mlx5_wqe *wqe = mpw.wqe; ++ ++ /* Request completion on last WQE. */ ++ wqe->ctrl[2] = htonl(8); ++ /* Save elts_head in unused "immediate" field of WQE. */ ++ wqe->ctrl[3] = elts_head; ++ txq->elts_comp = 0; ++ txq->mpw_comp = txq->wqe_ci; ++ txq->cq_pi++; ++ } else { ++ txq->elts_comp += j; ++ } ++#ifdef MLX5_PMD_SOFT_COUNTERS ++ /* Increment sent packets counter. */ ++ txq->stats.opackets += i; ++#endif ++ if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) ++ mlx5_empw_close(txq, &mpw); ++ else if (mpw.state == MLX5_MPW_STATE_OPENED) ++ mlx5_mpw_close(txq, &mpw); ++ /* Ring QP doorbell. */ ++ mlx5_tx_dbrec(txq, mpw.wqe); ++ txq->elts_head = elts_head; ++ return i; ++} ++ ++/** + * Translate RX completion flags to packet type. + * + * @param[in] cqe +diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h +index 0db810c..4a4bd84 100644 +--- a/drivers/net/mlx5/mlx5_rxtx.h ++++ b/drivers/net/mlx5/mlx5_rxtx.h +@@ -248,17 +248,21 @@ struct txq { + uint16_t elts_head; /* Current index in (*elts)[]. */ + uint16_t elts_tail; /* First element awaiting completion. */ + uint16_t elts_comp; /* Counter since last completion request. */ ++ uint16_t mpw_comp; /* WQ index since last completion request. */ + uint16_t cq_ci; /* Consumer index for completion queue. */ ++ uint16_t cq_pi; /* Producer index for completion queue. */ + uint16_t wqe_ci; /* Consumer index for work queue. */ + uint16_t wqe_pi; /* Producer index for work queue. */ + uint16_t elts_n:4; /* (*elts)[] length (in log2). */ + uint16_t cqe_n:4; /* Number of CQ elements (in log2). */ + uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */ +- uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ + uint16_t inline_en:1; /* When set inline is enabled. */ + uint16_t tso_en:1; /* When set hardware TSO is enabled. */ + uint16_t tunnel_en:1; + /* When set TX offload for tunneled packets are supported. */ ++ uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ ++ uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ ++ uint16_t inline_max_packet_sz; /* Max packet size for inlining. */ + uint32_t qp_num_8s; /* QP number shifted by 8. */ + volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ + volatile void *wqes; /* Work queue (use volatile to write into). */ +@@ -329,6 +333,7 @@ uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t); + uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t); + uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t); + uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t); ++uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t); + uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t); + uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t); + uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); +diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c +index 9d0c00f..bbfce75 100644 +--- a/drivers/net/mlx5/mlx5_txq.c ++++ b/drivers/net/mlx5/mlx5_txq.c +@@ -266,6 +266,7 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, + struct ibv_exp_cq_attr cq_attr; + } attr; + enum ibv_exp_query_intf_status status; ++ unsigned int cqe_n; + int ret = 0; + + if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { +@@ -276,6 +277,8 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, + (void)conf; /* Thresholds configuration (ignored). */ + assert(desc > MLX5_TX_COMP_THRESH); + tmpl.txq.elts_n = log2above(desc); ++ if (priv->mps == MLX5_MPW_ENHANCED) ++ tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg; + /* MRs will be registered in mp2mr[] later. */ + attr.rd = (struct ibv_exp_res_domain_init_attr){ + .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | +@@ -294,9 +297,12 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, + .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, + .res_domain = tmpl.rd, + }; ++ cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ? ++ ((desc / MLX5_TX_COMP_THRESH) - 1) : 1; ++ if (priv->mps == MLX5_MPW_ENHANCED) ++ cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV; + tmpl.cq = ibv_exp_create_cq(priv->ctx, +- (((desc / MLX5_TX_COMP_THRESH) - 1) ? +- ((desc / MLX5_TX_COMP_THRESH) - 1) : 1), ++ cqe_n, + NULL, NULL, 0, &attr.cq); + if (tmpl.cq == NULL) { + ret = ENOMEM; +@@ -340,9 +346,24 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, + tmpl.txq.max_inline = + ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE); +- attr.init.cap.max_inline_data = +- tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE; + tmpl.txq.inline_en = 1; ++ /* TSO and MPS can't be enabled concurrently. */ ++ assert(!priv->tso || !priv->mps); ++ if (priv->mps == MLX5_MPW_ENHANCED) { ++ tmpl.txq.inline_max_packet_sz = ++ priv->inline_max_packet_sz; ++ /* To minimize the size of data set, avoid requesting ++ * too large WQ. ++ */ ++ attr.init.cap.max_inline_data = ++ ((RTE_MIN(priv->txq_inline, ++ priv->inline_max_packet_sz) + ++ (RTE_CACHE_LINE_SIZE - 1)) / ++ RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE; ++ } else { ++ attr.init.cap.max_inline_data = ++ tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE; ++ } + } + if (priv->tso) { + uint16_t max_tso_inline = ((MLX5_MAX_TSO_HEADER + +-- +2.7.4 + diff --git a/src/plugins/dpdk/device/init.c b/src/plugins/dpdk/device/init.c index 9dc3fcce..538db6cb 100755 --- a/src/plugins/dpdk/device/init.c +++ b/src/plugins/dpdk/device/init.c @@ -790,7 +790,10 @@ dpdk_lib_init (dpdk_main_t * dm) case VNET_DPDK_PMD_MLX5: { - char *pn_100g[] = { "MCX415A-CCAT", "MCX416A-CCAT", 0 }; + char *pn_100g[] = { "MCX415A-CCAT", "MCX416A-CCAT", + "MCX556A-ECAT", "MCX556A-EDAT", "MCX555A-ECAT", + "MCX515A-CCAT", "MCX516A-CCAT", "MCX516A-CDAT", 0 + }; char *pn_40g[] = { "MCX413A-BCAT", "MCX414A-BCAT", "MCX415A-BCAT", "MCX416A-BCAT", "MCX4131A-BCAT", 0 }; -- cgit 1.2.3-korg From 1664f9ba4a154f6d11870e0bb9fdaa527b5afbaf Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 24 Apr 2017 20:48:53 +0200 Subject: Add support for 32-bit x86 compilation in Makefiles Change-Id: Ida73678b47b685abef4e81b5cad9fc13eb330850 Signed-off-by: Damjan Marion --- build-data/platforms/vpp.mk | 7 +++++-- dpdk/Makefile | 8 +++++--- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'dpdk/Makefile') diff --git a/build-data/platforms/vpp.mk b/build-data/platforms/vpp.mk index c61375d8..5aafdd76 100644 --- a/build-data/platforms/vpp.mk +++ b/build-data/platforms/vpp.mk @@ -12,11 +12,14 @@ # limitations under the License. # vector packet processor + +MACHINE=$(shell uname -m) + vpp_arch = native -ifeq ($(shell uname -m),x86_64) +ifeq ($(MACHINE),$(filter $(MACHINE),x86_64 i686)) vpp_march = corei7 # Nehalem Instruction set vpp_mtune = corei7-avx # Optimize for Sandy Bridge -else ifeq ($(shell uname -m),aarch64) +else ifeq ($(MACHINE),aarch64) ifeq ($(TARGET_PLATFORM),thunderx) vpp_march = armv8-a+crc vpp_mtune = thunderx diff --git a/dpdk/Makefile b/dpdk/Makefile index c46ef0f1..23e84ef4 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -50,11 +50,13 @@ else DPDK_CC=gcc endif +MACHINE=$(shell uname -m) + ############################################################################## -# Intel x86_64 +# Intel x86 ############################################################################## -ifeq ($(shell uname -m),x86_64) -DPDK_TARGET ?= x86_64-native-linuxapp-$(DPDK_CC) +ifeq ($(MACHINE),$(filter $(MACHINE),x86_64 i686)) +DPDK_TARGET ?= $(MACHINE)-native-linuxapp-$(DPDK_CC) DPDK_MACHINE ?= nhm DPDK_TUNE ?= core-avx2 -- cgit 1.2.3-korg From 6a5be214b95f3ec9f30eab46aa1a21a62a7bbc85 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Thu, 11 May 2017 14:55:43 +0200 Subject: dpdk: bump to dpdk 17.05 Change-Id: I19744387859129c6b8dc104041af158bf5f1d988 Signed-off-by: Damjan Marion --- dpdk/Makefile | 9 +++++---- src/plugins/dpdk/device/device.c | 4 ++++ src/plugins/dpdk/device/dpdk.h | 22 ---------------------- src/plugins/dpdk/device/init.c | 4 ++++ src/plugins/dpdk/hqos/hqos.c | 8 ++++++++ 5 files changed, 21 insertions(+), 26 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 23e84ef4..cd79e394 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -20,17 +20,18 @@ DPDK_PKTMBUF_HEADROOM ?= 128 DPDK_DOWNLOAD_DIR ?= $(HOME)/Downloads DPDK_DEBUG ?= n DPDK_CRYPTO_SW_PMD ?= n +DPDK_MLX4_PMD ?= n DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) -DPDK_VERSION ?= 17.02 -PKG_SUFFIX ?= vpp3 +DPDK_VERSION ?= 17.05 +PKG_SUFFIX ?= vpp1 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) -DPDK_16.11_TARBALL_MD5_CKSUM := 06c1c577795360719d0b4fafaeee21e9 DPDK_17.02_TARBALL_MD5_CKSUM := 6b9f7387c35641f4e8dbba3e528f2376 +DPDK_17.05_TARBALL_MD5_CKSUM := 0a68c31cd6a6cabeed0a4331073e4c05 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) ifeq ($(DPDK_CRYPTO_SW_PMD),y) @@ -139,6 +140,7 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_LIBRTE_PMD_QAT,y) $(call set,RTE_LIBRTE_PMD_AESNI_MB,$(DPDK_CRYPTO_SW_PMD)) $(call set,RTE_LIBRTE_PMD_AESNI_GCM,$(DPDK_CRYPTO_SW_PMD)) + $(call set,RTE_LIBRTE_MLX4_PMD,$(DPDK_MLX4_PMD)) $(call set,RTE_LIBRTE_MLX5_PMD,$(DPDK_MLX5_PMD)) @# not needed $(call set,RTE_LIBRTE_TIMER,n) @@ -147,7 +149,6 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_LIBRTE_ACL,n) $(call set,RTE_LIBRTE_POWER,n) $(call set,RTE_LIBRTE_DISTRIBUTOR,n) - $(call set,RTE_LIBRTE_REORDER,n) $(call set,RTE_LIBRTE_PORT,n) $(call set,RTE_LIBRTE_TABLE,n) $(call set,RTE_LIBRTE_PIPELINE,n) diff --git a/src/plugins/dpdk/device/device.c b/src/plugins/dpdk/device/device.c index 465a5874..51d6eacb 100644 --- a/src/plugins/dpdk/device/device.c +++ b/src/plugins/dpdk/device/device.c @@ -254,7 +254,11 @@ static_always_inline &tx_vector[tx_tail], tx_head - tx_tail); rv = rte_ring_sp_enqueue_burst (hqos->swq, (void **) &tx_vector[tx_tail], +#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0) + (uint16_t) (tx_head - tx_tail), 0); +#else (uint16_t) (tx_head - tx_tail)); +#endif } else if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD)) { diff --git a/src/plugins/dpdk/device/dpdk.h b/src/plugins/dpdk/device/dpdk.h index 7e2901bb..7e974491 100644 --- a/src/plugins/dpdk/device/dpdk.h +++ b/src/plugins/dpdk/device/dpdk.h @@ -55,7 +55,6 @@ extern vnet_device_class_t dpdk_device_class; extern vlib_node_registration_t dpdk_input_node; -#if RTE_VERSION >= RTE_VERSION_NUM(17, 2, 0, 0) #define foreach_dpdk_pmd \ _ ("net_thunderx", THUNDERX) \ _ ("net_e1000_em", E1000EM) \ @@ -75,27 +74,6 @@ extern vlib_node_registration_t dpdk_input_node; _ ("net_mlx4", MLX4) \ _ ("net_mlx5", MLX5) \ _ ("net_dpaa2", DPAA2) -#else -#define foreach_dpdk_pmd \ - _ ("net_thunderx", THUNDERX) \ - _ ("net_e1000_em", E1000EM) \ - _ ("net_e1000_igb", IGB) \ - _ ("net_e1000_igb_vf", IGBVF) \ - _ ("net_ixgbe", IXGBE) \ - _ ("net_ixgbe_vf", IXGBEVF) \ - _ ("net_i40e", I40E) \ - _ ("net_i40e_vf", I40EVF) \ - _ ("net_virtio", VIRTIO) \ - _ ("net_enic", ENIC) \ - _ ("net_vmxnet3", VMXNET3) \ - _ ("AF_PACKET PMD", AF_PACKET) \ - _ ("rte_bond_pmd", BOND) \ - _ ("net_fm10k", FM10K) \ - _ ("net_cxgbe", CXGBE) \ - _ ("net_mlx4", MLX4) \ - _ ("net_mlx5", MLX5) \ - _ ("net_dpaa2", DPAA2) -#endif typedef enum { diff --git a/src/plugins/dpdk/device/init.c b/src/plugins/dpdk/device/init.c index 69959c05..0ee28db5 100755 --- a/src/plugins/dpdk/device/init.c +++ b/src/plugins/dpdk/device/init.c @@ -1174,7 +1174,11 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) log_level = (CLIB_DEBUG > 0) ? RTE_LOG_DEBUG : RTE_LOG_NOTICE; +#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0) + rte_log_set_global_level (log_level); +#else rte_set_log_level (log_level); +#endif vm = vlib_get_main (); diff --git a/src/plugins/dpdk/hqos/hqos.c b/src/plugins/dpdk/hqos/hqos.c index ca1bdafa..2f2504d6 100644 --- a/src/plugins/dpdk/hqos/hqos.c +++ b/src/plugins/dpdk/hqos/hqos.c @@ -430,7 +430,11 @@ dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm) pkts_enq_len += rte_ring_sc_dequeue_burst (swq, (void **) &pkts_enq[pkts_enq_len], +#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0) + hqos->hqos_burst_enq, 0); +#else hqos->hqos_burst_enq); +#endif /* Get next SWQ for this device */ swq_pos++; @@ -521,7 +525,11 @@ dpdk_hqos_thread_internal (vlib_main_t * vm) pkts_enq_len += rte_ring_sc_dequeue_burst (swq, (void **) &pkts_enq[pkts_enq_len], +#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0) + hqos->hqos_burst_enq, 0); +#else hqos->hqos_burst_enq); +#endif /* Get next SWQ for this device */ swq_pos++; -- cgit 1.2.3-korg From 572825df79e27a8baebcfc11d91bdb6f9776c2c7 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 15 May 2017 12:32:15 +0200 Subject: dpdk: revert dpdk 17.05 change which causes virtio issues This patch is causing DPDK to provide bad MAC address for legacy virtio interfaces. Change-Id: I526cd35a38164ede80a8ab6decb9e0d1ebfad723 Signed-off-by: Damjan Marion --- dpdk/Makefile | 2 +- ...io-tx-with-can_push-when-VERSION_1-is-set.patch | 38 ------- .../0002-ethdev-fix-MAC-address-replay.patch | 83 -------------- .../0003-enic-fix-MAC-address-add-and-remove.patch | 122 --------------------- ...t-virtio-remove-redundant-MSI-X-detection.patch | 58 ++++++++++ 5 files changed, 59 insertions(+), 244 deletions(-) delete mode 100644 dpdk/dpdk-16.11_patches/0001-virtio-tx-with-can_push-when-VERSION_1-is-set.patch delete mode 100644 dpdk/dpdk-16.11_patches/0002-ethdev-fix-MAC-address-replay.patch delete mode 100644 dpdk/dpdk-16.11_patches/0003-enic-fix-MAC-address-add-and-remove.patch create mode 100644 dpdk/dpdk-17.05_patches/0001-Revert-net-virtio-remove-redundant-MSI-X-detection.patch (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index cd79e394..0f75a58a 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -26,7 +26,7 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 17.05 -PKG_SUFFIX ?= vpp1 +PKG_SUFFIX ?= vpp2 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) diff --git a/dpdk/dpdk-16.11_patches/0001-virtio-tx-with-can_push-when-VERSION_1-is-set.patch b/dpdk/dpdk-16.11_patches/0001-virtio-tx-with-can_push-when-VERSION_1-is-set.patch deleted file mode 100644 index c2693568..00000000 --- a/dpdk/dpdk-16.11_patches/0001-virtio-tx-with-can_push-when-VERSION_1-is-set.patch +++ /dev/null @@ -1,38 +0,0 @@ -From b002b56c0c8a1790549c23d93f3d57ffc212c6da Mon Sep 17 00:00:00 2001 -From: Pierre Pfister -Date: Tue, 8 Nov 2016 10:24:48 +0100 -Subject: [PATCH] virtio: tx with can_push when VERSION_1 is set - -Current virtio driver advertises VERSION_1 support, -but does not handle device's VERSION_1 support when -sending packets (it looks for ANY_LAYOUT feature, -which is absent). - -This patch enables 'can_push' in tx path when VERSION_1 -is advertised by the device. - -This significantly improves small packets forwarding rate -towards devices advertising VERSION_1 feature. - -Signed-off-by: Pierre Pfister ---- - drivers/net/virtio/virtio_rxtx.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c -index 22d97a4..1e5a6b9 100644 ---- a/drivers/net/virtio/virtio_rxtx.c -+++ b/drivers/net/virtio/virtio_rxtx.c -@@ -1015,7 +1015,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) - } - - /* optimize ring usage */ -- if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) && -+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || -+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && - rte_mbuf_refcnt_read(txm) == 1 && - RTE_MBUF_DIRECT(txm) && - txm->nb_segs == 1 && --- -2.7.4 (Apple Git-66) - diff --git a/dpdk/dpdk-16.11_patches/0002-ethdev-fix-MAC-address-replay.patch b/dpdk/dpdk-16.11_patches/0002-ethdev-fix-MAC-address-replay.patch deleted file mode 100644 index 951694de..00000000 --- a/dpdk/dpdk-16.11_patches/0002-ethdev-fix-MAC-address-replay.patch +++ /dev/null @@ -1,83 +0,0 @@ -From fb7c10892b057533931553f9367acd5541a0537c Mon Sep 17 00:00:00 2001 -From: Steve Shin -Date: Mon, 30 Jan 2017 09:12:25 -0800 -Subject: [PATCH] This patch fixes a bug in replaying MAC address to the - hardware in rte_eth_dev_config_restore() routine. Added default MAC replay as - well. - -Fixes: 4bdefaade6d1 ("ethdev: VMDQ enhancements") - -Signed-off-by: Steve Shin ---- - lib/librte_ether/rte_ethdev.c | 48 ++++++++++++++++++++++++------------------- - 1 file changed, 27 insertions(+), 21 deletions(-) - -diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c -index fde8112..2c07dfe 100644 ---- a/lib/librte_ether/rte_ethdev.c -+++ b/lib/librte_ether/rte_ethdev.c -@@ -857,34 +857,40 @@ struct rte_eth_dev * - { - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; -- struct ether_addr addr; -+ struct ether_addr *addr; - uint16_t i; - uint32_t pool = 0; -+ uint64_t pool_mask; - - dev = &rte_eth_devices[port_id]; - - rte_eth_dev_info_get(port_id, &dev_info); - -- if (RTE_ETH_DEV_SRIOV(dev).active) -- pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx; -- -- /* replay MAC address configuration */ -- for (i = 0; i < dev_info.max_mac_addrs; i++) { -- addr = dev->data->mac_addrs[i]; -- -- /* skip zero address */ -- if (is_zero_ether_addr(&addr)) -- continue; -- -- /* add address to the hardware */ -- if (*dev->dev_ops->mac_addr_add && -- (dev->data->mac_pool_sel[i] & (1ULL << pool))) -- (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool); -- else { -- RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n", -- port_id); -- /* exit the loop but not return an error */ -- break; -+ /* replay MAC address configuration including default MAC */ -+ addr = &dev->data->mac_addrs[0]; -+ if (*dev->dev_ops->mac_addr_set != NULL) -+ (*dev->dev_ops->mac_addr_set)(dev, addr); -+ else if (*dev->dev_ops->mac_addr_add != NULL) -+ (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); -+ -+ if (*dev->dev_ops->mac_addr_add != NULL) { -+ for (i = 1; i < dev_info.max_mac_addrs; i++) { -+ addr = &dev->data->mac_addrs[i]; -+ -+ /* skip zero address */ -+ if (is_zero_ether_addr(addr)) -+ continue; -+ -+ pool = 0; -+ pool_mask = dev->data->mac_pool_sel[i]; -+ -+ do { -+ if (pool_mask & 1ULL) -+ (*dev->dev_ops->mac_addr_add)(dev, -+ addr, i, pool); -+ pool_mask >>= 1; -+ pool++; -+ } while (pool_mask); - } - } - --- -1.9.1 - diff --git a/dpdk/dpdk-16.11_patches/0003-enic-fix-MAC-address-add-and-remove.patch b/dpdk/dpdk-16.11_patches/0003-enic-fix-MAC-address-add-and-remove.patch deleted file mode 100644 index e2965676..00000000 --- a/dpdk/dpdk-16.11_patches/0003-enic-fix-MAC-address-add-and-remove.patch +++ /dev/null @@ -1,122 +0,0 @@ -From 0cd0ed7b0b966704236e07fc1d3bd099deb407a7 Mon Sep 17 00:00:00 2001 -From: John Daley -Date: Tue, 31 Jan 2017 12:59:23 -0800 -Subject: [PATCH] The mac_addr_add callback function was simply replacing the - primary MAC address instead of adding new ones and the mac_addr_remove - callback would only remove the primary MAC form the adapter. Fix the - functions to add or remove new address. Allow up to 64 MAC addresses per - port. - -Signed-off-by: John Daley ---- - drivers/net/enic/enic.h | 5 +++-- - drivers/net/enic/enic_ethdev.c | 6 +++--- - drivers/net/enic/enic_main.c | 21 ++++++++------------- - 3 files changed, 14 insertions(+), 18 deletions(-) - -diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h -index 865cd76..5a807d4 100644 ---- a/drivers/net/enic/enic.h -+++ b/drivers/net/enic/enic.h -@@ -60,6 +60,7 @@ - #define ENIC_RQ_MAX 16 - #define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2)) - #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) -+#define ENIC_MAX_MAC_ADDR 64 - - #define VLAN_ETH_HLEN 18 - -@@ -277,8 +278,8 @@ extern void enic_dev_stats_get(struct enic *enic, - struct rte_eth_stats *r_stats); - extern void enic_dev_stats_clear(struct enic *enic); - extern void enic_add_packet_filter(struct enic *enic); --extern void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr); --extern void enic_del_mac_address(struct enic *enic); -+void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr); -+void enic_del_mac_address(struct enic *enic, int mac_index); - extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq); - extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, - struct rte_mbuf *tx_pkt, unsigned short len, -diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c -index 2b154ec..d2d04a9 100644 ---- a/drivers/net/enic/enic_ethdev.c -+++ b/drivers/net/enic/enic_ethdev.c -@@ -464,7 +464,7 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, - device_info->max_tx_queues = enic->conf_wq_count; - device_info->min_rx_bufsize = ENIC_MIN_MTU; - device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4; -- device_info->max_mac_addrs = 1; -+ device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR; - device_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | -@@ -545,12 +545,12 @@ static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev, - enic_set_mac_address(enic, mac_addr->addr_bytes); - } - --static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index) -+static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index) - { - struct enic *enic = pmd_priv(eth_dev); - - ENICPMD_FUNC_TRACE(); -- enic_del_mac_address(enic); -+ enic_del_mac_address(enic, index); - } - - static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) -diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c -index f0b15ac..21e8ede 100644 ---- a/drivers/net/enic/enic_main.c -+++ b/drivers/net/enic/enic_main.c -@@ -190,9 +190,12 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) - r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf); - } - --void enic_del_mac_address(struct enic *enic) -+void enic_del_mac_address(struct enic *enic, int mac_index) - { -- if (vnic_dev_del_addr(enic->vdev, enic->mac_addr)) -+ struct rte_eth_dev *eth_dev = enic->rte_dev; -+ uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes; -+ -+ if (vnic_dev_del_addr(enic->vdev, mac_addr)) - dev_err(enic, "del mac addr failed\n"); - } - -@@ -205,15 +208,6 @@ void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) - return; - } - -- err = vnic_dev_del_addr(enic->vdev, enic->mac_addr); -- if (err) { -- dev_err(enic, "del mac addr failed\n"); -- return; -- } -- -- ether_addr_copy((struct ether_addr *)mac_addr, -- (struct ether_addr *)enic->mac_addr); -- - err = vnic_dev_add_addr(enic->vdev, mac_addr); - if (err) { - dev_err(enic, "add mac addr failed\n"); -@@ -1308,13 +1302,14 @@ static int enic_dev_init(struct enic *enic) - /* Get the supported filters */ - enic_fdir_info(enic); - -- eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0); -+ eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN -+ * ENIC_MAX_MAC_ADDR, 0); - if (!eth_dev->data->mac_addrs) { - dev_err(enic, "mac addr storage alloc failed, aborting.\n"); - return -1; - } - ether_addr_copy((struct ether_addr *) enic->mac_addr, -- ð_dev->data->mac_addrs[0]); -+ eth_dev->data->mac_addrs); - - vnic_dev_set_reset_flag(enic->vdev, 0); - --- -1.9.1 - diff --git a/dpdk/dpdk-17.05_patches/0001-Revert-net-virtio-remove-redundant-MSI-X-detection.patch b/dpdk/dpdk-17.05_patches/0001-Revert-net-virtio-remove-redundant-MSI-X-detection.patch new file mode 100644 index 00000000..28dc68dd --- /dev/null +++ b/dpdk/dpdk-17.05_patches/0001-Revert-net-virtio-remove-redundant-MSI-X-detection.patch @@ -0,0 +1,58 @@ +From 3a1470e031ff33ac99da33b41dae0e9082d4da78 Mon Sep 17 00:00:00 2001 +From: Damjan Marion +Date: Mon, 15 May 2017 12:27:37 +0200 +Subject: [PATCH] Revert "net/virtio: remove redundant MSI-X detection" + +This reverts commit ee1843bd89076c59e50cadbef5c935613f543765. +--- + drivers/net/virtio/virtio_pci.c | 27 +++++++++++++++++++++++++++ + 1 file changed, 27 insertions(+) + +diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c +index b7b3d6157..127f25791 100644 +--- a/drivers/net/virtio/virtio_pci.c ++++ b/drivers/net/virtio/virtio_pci.c +@@ -274,6 +274,32 @@ legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) + VIRTIO_PCI_QUEUE_NOTIFY); + } + ++#ifdef RTE_EXEC_ENV_LINUXAPP ++static int ++legacy_virtio_has_msix(const struct rte_pci_addr *loc) ++{ ++ DIR *d; ++ char dirname[PATH_MAX]; ++ ++ snprintf(dirname, sizeof(dirname), ++ "%s/" PCI_PRI_FMT "/msi_irqs", pci_get_sysfs_path(), ++ loc->domain, loc->bus, loc->devid, loc->function); ++ ++ d = opendir(dirname); ++ if (d) ++ closedir(d); ++ ++ return d != NULL; ++} ++#else ++static int ++legacy_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused) ++{ ++ /* nic_uio does not enable interrupts, return 0 (false). */ ++ return 0; ++} ++#endif ++ + const struct virtio_pci_ops legacy_ops = { + .read_dev_cfg = legacy_read_dev_config, + .write_dev_cfg = legacy_write_dev_config, +@@ -694,6 +720,7 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw) + } + + virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops; ++ hw->use_msix = legacy_virtio_has_msix(&dev->addr); + hw->modern = 0; + + return 0; +-- +2.11.0 + -- cgit 1.2.3-korg From 9ec92d38bb5c2ec05f7698f58195c81f14120fb4 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Wed, 17 May 2017 16:07:32 +0200 Subject: dpdk: disable 16-bit descriptors for X710/XL710 This fixes issue with rx packet drops on VF. Change-Id: I8c1a35213013f8856b71e7204496f463319cbe28 Signed-off-by: Damjan Marion --- dpdk/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 0f75a58a..c6968d7c 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -26,7 +26,7 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 17.05 -PKG_SUFFIX ?= vpp2 +PKG_SUFFIX ?= vpp3 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) @@ -125,7 +125,6 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_PKTMBUF_HEADROOM,$(DPDK_PKTMBUF_HEADROOM)) $(call set,RTE_LIBEAL_USE_HPET,y) $(call set,RTE_BUILD_COMBINE_LIBS,y) - $(call set,RTE_LIBRTE_I40E_16BYTE_RX_DESC,y) $(call set,RTE_PCI_CONFIG,y) $(call set,RTE_PCI_EXTENDED_TAG,"on") $(call set,RTE_PCI_MAX_READ_REQUEST_SIZE,4096) -- cgit 1.2.3-korg From 0e2e10b77d63196bfb93ae5be1251bbc1a1b561a Mon Sep 17 00:00:00 2001 From: Sergio Gonzalez Monroy Date: Wed, 22 Mar 2017 15:22:14 +0000 Subject: dpdk: build sw cryptodev support with make verify Change-Id: Ica95b5d3d44563c93c89b2a3233171c3aa1f048d Signed-off-by: Sergio Gonzalez Monroy --- Makefile | 12 +++++++----- dpdk/Makefile | 13 +++++++++---- 2 files changed, 16 insertions(+), 9 deletions(-) (limited to 'dpdk/Makefile') diff --git a/Makefile b/Makefile index 946a8583..f78549f4 100644 --- a/Makefile +++ b/Makefile @@ -427,22 +427,24 @@ define banner @echo " " endef -verify: install-dep $(BR)/.bootstrap.ok dpdk-install-dev +verify: install-dep $(BR)/.bootstrap.ok + make -C dpdk install-$(PKG) DPDK_CRYPTO_SW_PMD=y $(call banner,"Building for PLATFORM=vpp using gcc") - @make -C build-root PLATFORM=vpp TAG=vpp wipe-all install-packages + @make -C build-root PLATFORM=vpp TAG=vpp wipe-all install-packages \ + vpp_uses_dpdk_cryptodev_sw=yes ifeq ($(OS_ID)-$(OS_VERSION_ID),ubuntu-16.04) $(call banner,"Installing dependencies") @sudo -E apt-get update @sudo -E apt-get $(CONFIRM) $(FORCE) install clang $(call banner,"Building for PLATFORM=vpp using clang") - @make -C build-root CC=clang PLATFORM=vpp TAG=vpp_clang wipe-all install-packages + @make -C build-root CC=clang PLATFORM=vpp TAG=vpp_clang \ + wipe-all install-packages vpp_uses_dpdk_cryptodev_sw=yes endif $(call banner,"Building sample-plugin") @make -C build-root PLATFORM=vpp TAG=vpp sample-plugin-install $(call banner,"Building $(PKG) packages") - @make pkg-$(PKG) + @make pkg-$(PKG) vpp_uses_dpdk_cryptodev_sw=yes ifeq ($(OS_ID)-$(OS_VERSION_ID),ubuntu-16.04) @make test endif - diff --git a/dpdk/Makefile b/dpdk/Makefile index c6968d7c..495b4919 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -26,7 +26,7 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 17.05 -PKG_SUFFIX ?= vpp3 +PKG_SUFFIX ?= vpp4 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) @@ -41,6 +41,7 @@ AESNIMB_LIB_SOURCE := $(B)/intel-ipsec-mb-0.44-gcm.2 ISA_L_CRYPTO_LIB_TARBALL := isa_l_crypto.tar.gz ISA_L_CRYPTO_LIB_TARBALL_URL := http://github.com/01org/isa-l_crypto/archive/master.tar.gz ISA_L_CRYPTO_LIB_SOURCE := $(B)/isa-l_crypto-master +ISA_L_CRYPTO_INSTALL_DIR := $(ISA_L_CRYPTO_LIB_SOURCE)/install endif ifneq (,$(findstring clang,$(CC))) @@ -90,8 +91,8 @@ DPDK_EXTRA_CFLAGS := -g -O0 endif ifeq ($(DPDK_CRYPTO_SW_PMD),y) -DPDK_EXTRA_CFLAGS += -I$(I)/include -DPDK_EXTRA_LDFLAGS += -L$(I)/lib +DPDK_EXTRA_CFLAGS += -I$(ISA_L_CRYPTO_INSTALL_DIR)/include +DPDK_EXTRA_LDFLAGS += -L$(ISA_L_CRYPTO_INSTALL_DIR)/lib DPDK_MAKE_EXTRA_ARGS += AESNI_MULTI_BUFFER_LIB_PATH=$(AESNIMB_LIB_SOURCE) endif @@ -167,12 +168,14 @@ $(CURDIR)/$(AESNIMB_LIB_TARBALL): then cp $(DPDK_DOWNLOAD_DIR)/$(AESNIMB_LIB_TARBALL) $(CURDIR) ; \ else curl -o $@ -LO $(AESNIMB_LIB_TARBALL_URL) ; \ fi + @rm -f $(B)/.download.ok $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL): @if [ -e $(DPDK_DOWNLOAD_DIR)/$(ISA_L_CRYPTO_LIB_TARBALL) ] ; \ then cp $(DPDK_DOWNLOAD_DIR)/$(ISA_L_CRYPTO_LIB_TARBALL) $(CURDIR) ; \ else curl -o $@ -LO $(ISA_L_CRYPTO_LIB_TARBALL_URL) ; \ fi + @rm -f $(B)/.download.ok DPDK_DOWNLOADS = $(CURDIR)/$(DPDK_TARBALL) ifeq ($(DPDK_CRYPTO_SW_PMD),y) @@ -233,8 +236,10 @@ ifeq ($(DPDK_CRYPTO_SW_PMD),y) make -C $(AESNIMB_LIB_SOURCE) -j NO_GCM=y cp $(AESNIMB_LIB_SOURCE)/libIPSec_MB.a $(I)/lib/ # Build ISA-L Crypto library - cd $(ISA_L_CRYPTO_LIB_SOURCE) && ./autogen.sh && ./configure --prefix=$(I) + cd $(ISA_L_CRYPTO_LIB_SOURCE) && ./autogen.sh && \ + ./configure --prefix=$(ISA_L_CRYPTO_INSTALL_DIR) CFLAGS='-fPIC -DPIC -O2' make -C $(ISA_L_CRYPTO_LIB_SOURCE) -j install + cp $(ISA_L_CRYPTO_INSTALL_DIR)/lib/libisal_crypto.* $(I)/lib/ endif @make $(DPDK_MAKE_ARGS) install @touch $@ -- cgit 1.2.3-korg From ad625f55db7f63eaae9835452727445649836414 Mon Sep 17 00:00:00 2001 From: Peter Mikus Date: Wed, 31 May 2017 15:23:59 +0000 Subject: Revert "dpdk: build sw cryptodev support with make verify" This reverts commit 0e2e10b77d63196bfb93ae5be1251bbc1a1b561a. Change-Id: I3c1737f391b6ed127f92416f06449216e79859bb Signed-off-by: Peter Mikus --- Makefile | 12 +++++------- dpdk/Makefile | 13 ++++--------- 2 files changed, 9 insertions(+), 16 deletions(-) (limited to 'dpdk/Makefile') diff --git a/Makefile b/Makefile index f78549f4..946a8583 100644 --- a/Makefile +++ b/Makefile @@ -427,24 +427,22 @@ define banner @echo " " endef -verify: install-dep $(BR)/.bootstrap.ok - make -C dpdk install-$(PKG) DPDK_CRYPTO_SW_PMD=y +verify: install-dep $(BR)/.bootstrap.ok dpdk-install-dev $(call banner,"Building for PLATFORM=vpp using gcc") - @make -C build-root PLATFORM=vpp TAG=vpp wipe-all install-packages \ - vpp_uses_dpdk_cryptodev_sw=yes + @make -C build-root PLATFORM=vpp TAG=vpp wipe-all install-packages ifeq ($(OS_ID)-$(OS_VERSION_ID),ubuntu-16.04) $(call banner,"Installing dependencies") @sudo -E apt-get update @sudo -E apt-get $(CONFIRM) $(FORCE) install clang $(call banner,"Building for PLATFORM=vpp using clang") - @make -C build-root CC=clang PLATFORM=vpp TAG=vpp_clang \ - wipe-all install-packages vpp_uses_dpdk_cryptodev_sw=yes + @make -C build-root CC=clang PLATFORM=vpp TAG=vpp_clang wipe-all install-packages endif $(call banner,"Building sample-plugin") @make -C build-root PLATFORM=vpp TAG=vpp sample-plugin-install $(call banner,"Building $(PKG) packages") - @make pkg-$(PKG) vpp_uses_dpdk_cryptodev_sw=yes + @make pkg-$(PKG) ifeq ($(OS_ID)-$(OS_VERSION_ID),ubuntu-16.04) @make test endif + diff --git a/dpdk/Makefile b/dpdk/Makefile index 495b4919..d781ed5d 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -26,7 +26,7 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 17.05 -PKG_SUFFIX ?= vpp4 +PKG_SUFFIX ?= vpp5 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) @@ -41,7 +41,6 @@ AESNIMB_LIB_SOURCE := $(B)/intel-ipsec-mb-0.44-gcm.2 ISA_L_CRYPTO_LIB_TARBALL := isa_l_crypto.tar.gz ISA_L_CRYPTO_LIB_TARBALL_URL := http://github.com/01org/isa-l_crypto/archive/master.tar.gz ISA_L_CRYPTO_LIB_SOURCE := $(B)/isa-l_crypto-master -ISA_L_CRYPTO_INSTALL_DIR := $(ISA_L_CRYPTO_LIB_SOURCE)/install endif ifneq (,$(findstring clang,$(CC))) @@ -91,8 +90,8 @@ DPDK_EXTRA_CFLAGS := -g -O0 endif ifeq ($(DPDK_CRYPTO_SW_PMD),y) -DPDK_EXTRA_CFLAGS += -I$(ISA_L_CRYPTO_INSTALL_DIR)/include -DPDK_EXTRA_LDFLAGS += -L$(ISA_L_CRYPTO_INSTALL_DIR)/lib +DPDK_EXTRA_CFLAGS += -I$(I)/include +DPDK_EXTRA_LDFLAGS += -L$(I)/lib DPDK_MAKE_EXTRA_ARGS += AESNI_MULTI_BUFFER_LIB_PATH=$(AESNIMB_LIB_SOURCE) endif @@ -168,14 +167,12 @@ $(CURDIR)/$(AESNIMB_LIB_TARBALL): then cp $(DPDK_DOWNLOAD_DIR)/$(AESNIMB_LIB_TARBALL) $(CURDIR) ; \ else curl -o $@ -LO $(AESNIMB_LIB_TARBALL_URL) ; \ fi - @rm -f $(B)/.download.ok $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL): @if [ -e $(DPDK_DOWNLOAD_DIR)/$(ISA_L_CRYPTO_LIB_TARBALL) ] ; \ then cp $(DPDK_DOWNLOAD_DIR)/$(ISA_L_CRYPTO_LIB_TARBALL) $(CURDIR) ; \ else curl -o $@ -LO $(ISA_L_CRYPTO_LIB_TARBALL_URL) ; \ fi - @rm -f $(B)/.download.ok DPDK_DOWNLOADS = $(CURDIR)/$(DPDK_TARBALL) ifeq ($(DPDK_CRYPTO_SW_PMD),y) @@ -236,10 +233,8 @@ ifeq ($(DPDK_CRYPTO_SW_PMD),y) make -C $(AESNIMB_LIB_SOURCE) -j NO_GCM=y cp $(AESNIMB_LIB_SOURCE)/libIPSec_MB.a $(I)/lib/ # Build ISA-L Crypto library - cd $(ISA_L_CRYPTO_LIB_SOURCE) && ./autogen.sh && \ - ./configure --prefix=$(ISA_L_CRYPTO_INSTALL_DIR) CFLAGS='-fPIC -DPIC -O2' + cd $(ISA_L_CRYPTO_LIB_SOURCE) && ./autogen.sh && ./configure --prefix=$(I) make -C $(ISA_L_CRYPTO_LIB_SOURCE) -j install - cp $(ISA_L_CRYPTO_INSTALL_DIR)/lib/libisal_crypto.* $(I)/lib/ endif @make $(DPDK_MAKE_ARGS) install @touch $@ -- cgit 1.2.3-korg From b2861e8fb6855e9924887e5743d65ebbfbc6d7d2 Mon Sep 17 00:00:00 2001 From: Chris Luke Date: Wed, 14 Jun 2017 11:24:41 -0400 Subject: make: Fix parallel building with some container platforms (VPP-880) With some Linux container platforms /proc/cpuinfo reads as an empty file. (Aside: stat on /proc/cpuinfo always indicates a length of zero bytes, regardless of its content). This has the effect that the make '-j' parameter being passed the unhelpful value of '0' both in build-root/Makefile and dpdk/Makefile. Make complains with the error: make: the '-j' option requires a positive integer argument This patch checks for '0' and replaces it with '2' as a reasonable number of jobs to run in parallel when the CPU count isn't known (and assumed to be one). It also makes the value determination consistent between VPP and DPDK (2*ncpu). Change-Id: I78b89420114a825fab4d339e4f9291d486b7b9c8 Signed-off-by: Chris Luke --- build-root/Makefile | 17 +++++++---------- dpdk/Makefile | 8 +++++++- 2 files changed, 14 insertions(+), 11 deletions(-) (limited to 'dpdk/Makefile') diff --git a/build-root/Makefile b/build-root/Makefile index f2f77804..0fed520c 100644 --- a/build-root/Makefile +++ b/build-root/Makefile @@ -653,16 +653,13 @@ configure_check_timestamp = \ # Package build ###################################################################### -linux_n_cpus = `grep '^processor' /proc/cpuinfo | wc -l` - -MAKE_PARALLEL_JOBS = \ - -j $(shell \ - if [ -f /proc/cpuinfo ] ; then \ - expr 2 '*' $(linux_n_cpus) ; \ - else \ - echo 1 ; \ - fi) - +# /proc/cpuinfo does not exist on platforms without a /proc and on some +# platforms, notably inside containers, it has no content. In those cases +# we assume there's 1 processor; we use 2*ncpu for the -j option. +# NB: GNU Make 4.2 will let us use '$(file Date: Tue, 6 Jun 2017 15:29:16 +0100 Subject: dpdk: update build Current optional DPDK PMDs are: - AESNI MB PMD (SW crypto) - AESNI GCM PMD (SW crypto) - MLX4 PMD - MLX5 PMD This change will always build DPDK SW crypto PMDs and required SW crypto libraries, while MLX PMDs are still optional and the user has to build required libraries. Now the configure script detects if any of the optional DPDK PMDs were built and link against their required libraries/dependencies. Change-Id: I1560bebd71035d6486483f22da90042ec2ce40a1 Signed-off-by: Sergio Gonzalez Monroy --- dpdk/Makefile | 59 ++++++++++++++++++++++++++++------------------------- src/configure.ac | 48 ++++++++++++++++++++++++++++++++++++++++--- src/plugins/dpdk.am | 7 ++++++- 3 files changed, 82 insertions(+), 32 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 06ba1270..659439c0 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -19,14 +19,13 @@ DPDK_INSTALL_DIR ?= $(CURDIR)/_install DPDK_PKTMBUF_HEADROOM ?= 128 DPDK_DOWNLOAD_DIR ?= $(HOME)/Downloads DPDK_DEBUG ?= n -DPDK_CRYPTO_SW_PMD ?= n DPDK_MLX4_PMD ?= n DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 17.05 -PKG_SUFFIX ?= vpp5 +PKG_SUFFIX ?= vpp6 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) @@ -34,14 +33,15 @@ DPDK_17.02_TARBALL_MD5_CKSUM := 6b9f7387c35641f4e8dbba3e528f2376 DPDK_17.05_TARBALL_MD5_CKSUM := 0a68c31cd6a6cabeed0a4331073e4c05 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) -ifeq ($(DPDK_CRYPTO_SW_PMD),y) -AESNIMB_LIB_TARBALL := v0.44-gcm.2.tar.gz +IPSEC_MB_VER := 0.45 +AESNIMB_LIB_TARBALL := v$(IPSEC_MB_VER).tar.gz AESNIMB_LIB_TARBALL_URL := http://github.com/01org/intel-ipsec-mb/archive/$(AESNIMB_LIB_TARBALL) -AESNIMB_LIB_SOURCE := $(B)/intel-ipsec-mb-0.44-gcm.2 -ISA_L_CRYPTO_LIB_TARBALL := isa_l_crypto.tar.gz -ISA_L_CRYPTO_LIB_TARBALL_URL := http://github.com/01org/isa-l_crypto/archive/master.tar.gz -ISA_L_CRYPTO_LIB_SOURCE := $(B)/isa-l_crypto-master -endif +AESNIMB_LIB_SOURCE := $(B)/intel-ipsec-mb-$(IPSEC_MB_VER) +ISA_L_CRYPTO_VER := 2.18.0 +ISA_L_CRYPTO_LIB_TARBALL := v$(ISA_L_CRYPTO_VER).tar.gz +ISA_L_CRYPTO_LIB_TARBALL_URL := http://github.com/01org/isa-l_crypto/archive/$(ISA_L_CRYPTO_LIB_TARBALL) +ISA_L_CRYPTO_LIB_SOURCE := $(B)/isa-l_crypto-$(ISA_L_CRYPTO_VER) +ISA_L_CRYPTO_INSTALL_DIR := $(ISA_L_CRYPTO_LIB_SOURCE)/install ifneq (,$(findstring clang,$(CC))) DPDK_CC=clang @@ -95,11 +95,9 @@ else DPDK_EXTRA_CFLAGS := -g -O0 endif -ifeq ($(DPDK_CRYPTO_SW_PMD),y) -DPDK_EXTRA_CFLAGS += -I$(I)/include +DPDK_EXTRA_CFLAGS += -I$(ISA_L_CRYPTO_INSTALL_DIR)/include -Wl,-z,muldefs DPDK_EXTRA_LDFLAGS += -L$(I)/lib DPDK_MAKE_EXTRA_ARGS += AESNI_MULTI_BUFFER_LIB_PATH=$(AESNIMB_LIB_SOURCE) -endif # assemble DPDK make arguments DPDK_MAKE_ARGS := -C $(DPDK_SOURCE) -j $(JOBS) \ @@ -111,8 +109,6 @@ DPDK_MAKE_ARGS := -C $(DPDK_SOURCE) -j $(JOBS) \ DESTDIR=$(I) \ $(DPDK_MAKE_EXTRA_ARGS) -DPDK_SOURCE_FILES := $(shell [ -e $(DPDK_SOURCE) ] && find $(DPDK_SOURCE) -name "*.[chS]") - define set @if grep -q CONFIG_$1 $@ ; \ then sed -i -e 's/.*\(CONFIG_$1=\).*/\1$2/' $@ ; \ @@ -143,8 +139,8 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_LIBRTE_PMD_BOND,y) $(call set,RTE_LIBRTE_IP_FRAG,y) $(call set,RTE_LIBRTE_PMD_QAT,y) - $(call set,RTE_LIBRTE_PMD_AESNI_MB,$(DPDK_CRYPTO_SW_PMD)) - $(call set,RTE_LIBRTE_PMD_AESNI_GCM,$(DPDK_CRYPTO_SW_PMD)) + $(call set,RTE_LIBRTE_PMD_AESNI_MB,y) + $(call set,RTE_LIBRTE_PMD_AESNI_GCM,y) $(call set,RTE_LIBRTE_MLX4_PMD,$(DPDK_MLX4_PMD)) $(call set,RTE_LIBRTE_MLX5_PMD,$(DPDK_MLX5_PMD)) @# not needed @@ -181,10 +177,8 @@ $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL): fi DPDK_DOWNLOADS = $(CURDIR)/$(DPDK_TARBALL) -ifeq ($(DPDK_CRYPTO_SW_PMD),y) DPDK_DOWNLOADS += $(CURDIR)/$(AESNIMB_LIB_TARBALL) DPDK_DOWNLOADS += $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL) -endif $(B)/.download.ok: $(DPDK_DOWNLOADS) @mkdir -p $(B) @@ -200,12 +194,10 @@ download: $(B)/.download.ok $(B)/.extract.ok: $(B)/.download.ok @echo --- extracting $(DPDK_TARBALL) --- @tar --directory $(B) --extract --file $(CURDIR)/$(DPDK_TARBALL) -ifeq ($(DPDK_CRYPTO_SW_PMD),y) @echo --- extracting $(AESNIMB_LIB_TARBALL) --- @tar --directory $(B) --extract --file $(CURDIR)/$(AESNIMB_LIB_TARBALL) @echo --- extracting $(ISA_L_CRYPTO_LIB_TARBALL) --- @tar --directory $(B) --extract --file $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL) -endif @touch $@ .PHONY: extract @@ -231,18 +223,29 @@ $(B)/.config.ok: $(B)/.patch.ok $(B)/custom-config .PHONY: config config: $(B)/.config.ok -$(B)/.build.ok: $(DPDK_SOURCE_FILES) - @if [ ! -e $(B)/.config.ok ] ; then echo 'Please run "make config" first' && false ; fi -ifeq ($(DPDK_CRYPTO_SW_PMD),y) - # Build IPsec_MB library +# Order matters +BUILD_TARGETS += build-ipsec-mb build-isal-crypto build-dpdk + +.PHONY: build-ipsec-mb +build-ipsec-mb: mkdir -p $(I)/lib/ make -C $(AESNIMB_LIB_SOURCE) -j NO_GCM=y cp $(AESNIMB_LIB_SOURCE)/libIPSec_MB.a $(I)/lib/ - # Build ISA-L Crypto library - cd $(ISA_L_CRYPTO_LIB_SOURCE) && ./autogen.sh && ./configure --prefix=$(I) + +.PHONY: build-isal-crypto +build-isal-crypto: + mkdir -p $(I)/lib/ + cd $(ISA_L_CRYPTO_LIB_SOURCE) && ./autogen.sh && \ + ./configure --prefix=$(ISA_L_CRYPTO_INSTALL_DIR) CFLAGS='-fPIC -DPIC -O2' make -C $(ISA_L_CRYPTO_LIB_SOURCE) -j install -endif + cp $(ISA_L_CRYPTO_INSTALL_DIR)/lib/libisal_crypto.a $(I)/lib/ + +.PHONY: build-dpdk +build-dpdk: + @if [ ! -e $(B)/.config.ok ] ; then echo 'Please run "make config" first' && false ; fi @make $(DPDK_MAKE_ARGS) install + +$(B)/.build.ok: $(BUILD_TARGETS) @touch $@ .PHONY: build @@ -323,7 +326,7 @@ build-rpm: $(DEV_RPM) install-rpm: ifneq ($(INSTALLED_RPM_VER),$(DPDK_VERSION)-$(PKG_SUFFIX)) - @make $(DEV_RPM) + @$(MAKE) $(DEV_RPM) sudo rpm -Uih $(DEV_RPM) else @echo "==========================================================" diff --git a/src/configure.ac b/src/configure.ac index ef8c3b00..cb00d0bd 100644 --- a/src/configure.ac +++ b/src/configure.ac @@ -80,6 +80,23 @@ AC_DEFUN([PLUGIN_DISABLED], AC_DEFUN([PRINT_VAL], [ AC_MSG_RESULT(AC_HELP_STRING($1,$2)) ]) +AC_DEFUN([DPDK_IS_PMD_ENABLED], +[ + AC_MSG_CHECKING([for $1 in rte_config.h]) + AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [[#include ]], + [[return RTE_$1;]], + )], + [with_$2=yes] + [AC_MSG_RESULT([yes])], + [with_$2=no] + [AC_MSG_RESULT([no])] + ) + AM_CONDITIONAL(m4_toupper(WITH_$2), test "$with_$2" = "yes") + m4_append_uniq([list_of_with], [$2], [, ]) +]) + ############################################################################### # configure arguments ############################################################################### @@ -97,8 +114,6 @@ DISABLE_ARG(papi, [Disable Python API bindings]) DISABLE_ARG(japi, [Disable Java API bindings]) # --with-X -WITH_ARG(dpdk_crypto_sw,[Use DPDK cryptodev SW PMDs]) -WITH_ARG(dpdk_mlx5_pmd, [Use DPDK with mlx5 PMD]) # --without-X WITHOUT_ARG(libssl, [Disable libssl]) @@ -130,7 +145,6 @@ AC_SUBST(PRE_DATA_SIZE, [$with_pre_data]) AC_SUBST(APICLI, [-DVPP_API_TEST_BUILTIN=${n_with_apicli}]) AC_DEFINE_UNQUOTED(DPDK_SHARED_LIB, [${n_enable_dpdk_shared}]) -AC_DEFINE_UNQUOTED(DPDK_CRYPTO_SW, [${n_with_dpdk_crypto_sw}]) AC_DEFINE_UNQUOTED(WITH_LIBSSL, [${n_with_libssl}]) @@ -170,6 +184,34 @@ AM_COND_IF([ENABLE_DPDK_SHARED], [AC_MSG_ERROR([DPDK shared library not found])],) ]) +DPDK_IS_PMD_ENABLED(LIBRTE_PMD_AESNI_MB, dpdk_aesni_mb_pmd) +AM_COND_IF([WITH_DPDK_AESNI_MB_PMD], +[ + AC_CHECK_LIB([IPSec_MB], [submit_job_sse], [], + [AC_MSG_ERROR([IPSec_MB library not found])]) +]) + +DPDK_IS_PMD_ENABLED(LIBRTE_PMD_AESNI_GCM, dpdk_aesni_gcm_pmd) +AM_COND_IF([WITH_DPDK_AESNI_GCM_PMD], +[ + AC_CHECK_LIB([isal_crypto], [aesni_gcm128_init], [], + [AC_MSG_ERROR([isal_crypto library not found])]) +]) + +DPDK_IS_PMD_ENABLED(LIBRTE_MLX5_PMD, dpdk_mlx5_pmd) +AM_COND_IF([WITH_DPDK_MLX5_PMD], +[ + AC_CHECK_LIB([ibverbs], [ibv_fork_init], [], + [AC_MSG_ERROR([ibverbs library not found])]) +]) + +DPDK_IS_PMD_ENABLED(LIBRTE_MLX4_PMD, dpdk_mlx4_pmd) +AM_COND_IF([WITH_DPDK_MLX4_PMD], +[ + AC_CHECK_LIB([ibverbs], [ibv_fork_init], [], + [AC_MSG_ERROR([ibverbs library not found])]) +]) + AM_COND_IF([ENABLE_G2], [ PKG_CHECK_MODULES(g2, gtk+-2.0) diff --git a/src/plugins/dpdk.am b/src/plugins/dpdk.am index 75bfb967..3a1ffeeb 100644 --- a/src/plugins/dpdk.am +++ b/src/plugins/dpdk.am @@ -19,14 +19,19 @@ dpdk_plugin_la_LDFLAGS = $(AM_LDFLAGS) -ldpdk else dpdk_plugin_la_LDFLAGS = $(AM_LDFLAGS) -Wl,--whole-archive,-l:libdpdk.a,--no-whole-archive endif -if WITH_DPDK_CRYPTO_SW +if WITH_DPDK_AESNI_MB_PMD dpdk_plugin_la_LDFLAGS += -Wl,--exclude-libs,libIPSec_MB.a,-l:libIPSec_MB.a +endif +if WITH_DPDK_AESNI_GCM_PMD dpdk_plugin_la_LDFLAGS += -Wl,--exclude-libs,libisal_crypto.a,-l:libisal_crypto.a endif dpdk_plugin_la_LDFLAGS += -Wl,-lm,-ldl if WITH_DPDK_MLX5_PMD dpdk_plugin_la_LDFLAGS += -Wl,-libverbs endif +if WITH_DPDK_MLX4_PMD +dpdk_plugin_la_LDFLAGS += -Wl,-libverbs +endif dpdk_plugin_la_SOURCES = \ dpdk/main.c \ -- cgit 1.2.3-korg From 618f7b003ea92c01bbb892b937a5ce5417603d6b Mon Sep 17 00:00:00 2001 From: Sergio Gonzalez Monroy Date: Wed, 19 Jul 2017 16:22:28 +0100 Subject: dpdk: only build SW crypto for x86_64 platforms Change-Id: If559747ad59c82c81d15734f27e15548eca0962b Signed-off-by: Sergio Gonzalez Monroy --- dpdk/Makefile | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 659439c0..be1d628a 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -53,6 +53,12 @@ endif MACHINE=$(shell uname -m) +ifeq ($(MACHINE),$(filter $(MACHINE),x86_64)) +AESNI := y +else +AESNI := n +endif + ############################################################################## # Intel x86 ############################################################################## @@ -60,7 +66,6 @@ ifeq ($(MACHINE),$(filter $(MACHINE),x86_64 i686)) DPDK_TARGET ?= $(MACHINE)-native-linuxapp-$(DPDK_CC) DPDK_MACHINE ?= nhm DPDK_TUNE ?= core-avx2 - ############################################################################## # Cavium ThunderX ############################################################################## @@ -139,8 +144,8 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_LIBRTE_PMD_BOND,y) $(call set,RTE_LIBRTE_IP_FRAG,y) $(call set,RTE_LIBRTE_PMD_QAT,y) - $(call set,RTE_LIBRTE_PMD_AESNI_MB,y) - $(call set,RTE_LIBRTE_PMD_AESNI_GCM,y) + $(call set,RTE_LIBRTE_PMD_AESNI_MB,$(AESNI)) + $(call set,RTE_LIBRTE_PMD_AESNI_GCM,$(AESNI)) $(call set,RTE_LIBRTE_MLX4_PMD,$(DPDK_MLX4_PMD)) $(call set,RTE_LIBRTE_MLX5_PMD,$(DPDK_MLX5_PMD)) @# not needed @@ -177,8 +182,10 @@ $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL): fi DPDK_DOWNLOADS = $(CURDIR)/$(DPDK_TARBALL) +ifeq ($(AESNI),y) DPDK_DOWNLOADS += $(CURDIR)/$(AESNIMB_LIB_TARBALL) DPDK_DOWNLOADS += $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL) +endif $(B)/.download.ok: $(DPDK_DOWNLOADS) @mkdir -p $(B) -- cgit 1.2.3-korg From 3a079ce28204c986f16e99fbaaa685bdb3b7f752 Mon Sep 17 00:00:00 2001 From: Marco Varlese Date: Mon, 14 Aug 2017 10:53:28 +0200 Subject: Added MD5SUM for DPDK 17.08 tarball as a first step towards migration Change-Id: Ic73b857c4e3d5a3f695e93924de5a5bed0af5019 Signed-off-by: Marco Varlese --- dpdk/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index be1d628a..cd70c894 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -31,6 +31,7 @@ DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_17.02_TARBALL_MD5_CKSUM := 6b9f7387c35641f4e8dbba3e528f2376 DPDK_17.05_TARBALL_MD5_CKSUM := 0a68c31cd6a6cabeed0a4331073e4c05 +DPDK_17.08_TARBALL_MD5_CKSUM := 0641f59ea8ea98afefa7cfa2699f6241 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) IPSEC_MB_VER := 0.45 -- cgit 1.2.3-korg From 4aef5b78e71feb63ff883395c041edf48d8b516f Mon Sep 17 00:00:00 2001 From: Sergio Gonzalez Monroy Date: Mon, 14 Aug 2017 09:26:44 +0100 Subject: dpdk: force libdir for isa-l crypto library Depending on the OS, the default libdir might change. RHEL/Ubuntu: libdir={exec_prefix}/lib OpenSUSE: libdir={exec_prefix}/lib64 Change-Id: I5f1672e5815ad821e6ac5fff95de5232ab735b67 Signed-off-by: Sergio Gonzalez Monroy --- dpdk/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index cd70c894..2f5049e5 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -244,7 +244,8 @@ build-ipsec-mb: build-isal-crypto: mkdir -p $(I)/lib/ cd $(ISA_L_CRYPTO_LIB_SOURCE) && ./autogen.sh && \ - ./configure --prefix=$(ISA_L_CRYPTO_INSTALL_DIR) CFLAGS='-fPIC -DPIC -O2' + ./configure --prefix=$(ISA_L_CRYPTO_INSTALL_DIR) \ + --libdir=$(ISA_L_CRYPTO_INSTALL_DIR)/lib CFLAGS='-fPIC -DPIC -O2' make -C $(ISA_L_CRYPTO_LIB_SOURCE) -j install cp $(ISA_L_CRYPTO_INSTALL_DIR)/lib/libisal_crypto.a $(I)/lib/ -- cgit 1.2.3-korg From 8ddd518f9a65af77dc3294f1965452aa72cb0eb2 Mon Sep 17 00:00:00 2001 From: Marco Varlese Date: Mon, 14 Aug 2017 16:07:00 +0200 Subject: Previous version was still downloading, unpacking and building IPSEC / AES libraries. This patch addresses the misbehaviour. Change-Id: I41f1ece3ca21c5a8f2c95533ed3d77a535233ea6 Signed-off-by: Marco Varlese --- dpdk/Makefile | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 2f5049e5..5e947b30 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -34,6 +34,12 @@ DPDK_17.05_TARBALL_MD5_CKSUM := 0a68c31cd6a6cabeed0a4331073e4c05 DPDK_17.08_TARBALL_MD5_CKSUM := 0641f59ea8ea98afefa7cfa2699f6241 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) +ifeq ($(MACHINE),$(filter $(MACHINE),x86_64)) +AESNI := y +else +AESNI := n +endif + IPSEC_MB_VER := 0.45 AESNIMB_LIB_TARBALL := v$(IPSEC_MB_VER).tar.gz AESNIMB_LIB_TARBALL_URL := http://github.com/01org/intel-ipsec-mb/archive/$(AESNIMB_LIB_TARBALL) @@ -54,12 +60,6 @@ endif MACHINE=$(shell uname -m) -ifeq ($(MACHINE),$(filter $(MACHINE),x86_64)) -AESNI := y -else -AESNI := n -endif - ############################################################################## # Intel x86 ############################################################################## @@ -202,11 +202,13 @@ download: $(B)/.download.ok $(B)/.extract.ok: $(B)/.download.ok @echo --- extracting $(DPDK_TARBALL) --- @tar --directory $(B) --extract --file $(CURDIR)/$(DPDK_TARBALL) +ifeq ($(AESNI),y) @echo --- extracting $(AESNIMB_LIB_TARBALL) --- @tar --directory $(B) --extract --file $(CURDIR)/$(AESNIMB_LIB_TARBALL) @echo --- extracting $(ISA_L_CRYPTO_LIB_TARBALL) --- @tar --directory $(B) --extract --file $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL) @touch $@ +endif .PHONY: extract extract: $(B)/.extract.ok @@ -232,7 +234,11 @@ $(B)/.config.ok: $(B)/.patch.ok $(B)/custom-config config: $(B)/.config.ok # Order matters +ifeq ($(AESNI),y) BUILD_TARGETS += build-ipsec-mb build-isal-crypto build-dpdk +else +BUILD_TARGETS += build-dpdk +endif .PHONY: build-ipsec-mb build-ipsec-mb: -- cgit 1.2.3-korg From 844e0eba7708f4f39d17436d2b352392ab75e4cf Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 21 Aug 2017 22:27:04 +0200 Subject: dpdk: disable tun/tap PMD Beside the fact that we don't need it, it fails to build on ARM64. Change-Id: Iefae8bf234b588d8005df5e053b9152b6611929c Signed-off-by: Damjan Marion --- dpdk/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 5e947b30..9f03b710 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -150,6 +150,7 @@ $(B)/custom-config: $(B)/.patch.ok Makefile $(call set,RTE_LIBRTE_MLX4_PMD,$(DPDK_MLX4_PMD)) $(call set,RTE_LIBRTE_MLX5_PMD,$(DPDK_MLX5_PMD)) @# not needed + $(call set,RTE_LIBRTE_PMD_TAP,n) $(call set,RTE_LIBRTE_TIMER,n) $(call set,RTE_LIBRTE_CFGFILE,n) $(call set,RTE_LIBRTE_LPM,n) -- cgit 1.2.3-korg From 5f22f4ddded8ac41487dab3069ff8d77c3916205 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 21 Aug 2017 22:19:04 +0200 Subject: dpdk: define MACHINE before it is used This fixes build on non-x86 platforms like arm64. Change-Id: I7ff5df92f89e34c27889d82f35924dc28cde8c39 Signed-off-by: Damjan Marion --- dpdk/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 9f03b710..2e4b0e96 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -33,6 +33,7 @@ DPDK_17.02_TARBALL_MD5_CKSUM := 6b9f7387c35641f4e8dbba3e528f2376 DPDK_17.05_TARBALL_MD5_CKSUM := 0a68c31cd6a6cabeed0a4331073e4c05 DPDK_17.08_TARBALL_MD5_CKSUM := 0641f59ea8ea98afefa7cfa2699f6241 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) +MACHINE=$(shell uname -m) ifeq ($(MACHINE),$(filter $(MACHINE),x86_64)) AESNI := y @@ -58,8 +59,6 @@ else DPDK_CC=gcc endif -MACHINE=$(shell uname -m) - ############################################################################## # Intel x86 ############################################################################## -- cgit 1.2.3-korg From acdc306093aaea2633cf765307d6cb7c1b80081c Mon Sep 17 00:00:00 2001 From: Sergio Gonzalez Monroy Date: Thu, 24 Aug 2017 14:09:17 +0100 Subject: dpdk: required changes for 17.08 DPDK 17.08 breaks ethdev and cryptodev APIs. Address those changes while keeping backwards compatibility for DPDK 17.02 and 17.05. Change-Id: Idd6ac264d0d047fe586c41d4c4ca74e8fc778a54 Signed-off-by: Sergio Gonzalez Monroy --- Makefile | 6 +- dpdk/Makefile | 47 +++++--- src/configure.ac | 70 ++++++++++-- src/plugins/dpdk.am | 16 ++- src/plugins/dpdk/device/common.c | 28 ++++- src/plugins/dpdk/device/dpdk.h | 7 ++ src/plugins/dpdk/ipsec/cli.c | 15 +++ src/plugins/dpdk/ipsec/esp.h | 212 ++++++++++++++++++++++++++++++----- src/plugins/dpdk/ipsec/esp_decrypt.c | 140 ++++++++++------------- src/plugins/dpdk/ipsec/esp_encrypt.c | 106 +++++++----------- src/plugins/dpdk/ipsec/ipsec.c | 130 +++++++++++++++++---- src/plugins/dpdk/ipsec/ipsec.h | 33 ++++-- 12 files changed, 567 insertions(+), 243 deletions(-) (limited to 'dpdk/Makefile') diff --git a/Makefile b/Makefile index c1a7cbb5..6ac6f6e7 100644 --- a/Makefile +++ b/Makefile @@ -60,7 +60,7 @@ endif DEB_DEPENDS = curl build-essential autoconf automake bison libssl-dev ccache DEB_DEPENDS += debhelper dkms git libtool libapr1-dev dh-systemd DEB_DEPENDS += libconfuse-dev git-review exuberant-ctags cscope pkg-config -DEB_DEPENDS += lcov chrpath autoconf nasm indent +DEB_DEPENDS += lcov chrpath autoconf nasm indent libnuma-dev DEB_DEPENDS += python-all python-dev python-virtualenv python-pip libffi6 ifeq ($(OS_VERSION_ID),14.04) DEB_DEPENDS += openjdk-8-jdk-headless @@ -73,7 +73,7 @@ endif RPM_DEPENDS = redhat-lsb glibc-static java-1.8.0-openjdk-devel yum-utils RPM_DEPENDS += openssl-devel https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm apr-devel -RPM_DEPENDS += python-devel +RPM_DEPENDS += python-devel numactl-devel ifeq ($(OS_ID)-$(OS_VERSION_ID),fedora-25) RPM_DEPENDS += python2-virtualenv RPM_DEPENDS_GROUPS = 'C Development Tools and Libraries' @@ -99,7 +99,7 @@ endif RPM_SUSE_DEPENDS = autoconf automake bison ccache chrpath distribution-release gcc6 glibc-devel-static RPM_SUSE_DEPENDS += java-1_8_0-openjdk-devel libopenssl-devel libtool lsb-release make openssl-devel -RPM_SUSE_DEPENDS += python-devel python-pip python-rpm-macros shadow nasm +RPM_SUSE_DEPENDS += python-devel python-pip python-rpm-macros shadow nasm numactl-devel ifneq ($(wildcard $(STARTUP_DIR)/startup.conf),) STARTUP_CONF ?= $(STARTUP_DIR)/startup.conf diff --git a/dpdk/Makefile b/dpdk/Makefile index 2e4b0e96..8d5b42ef 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -35,17 +35,27 @@ DPDK_17.08_TARBALL_MD5_CKSUM := 0641f59ea8ea98afefa7cfa2699f6241 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) MACHINE=$(shell uname -m) +AESNI ?= n +ISA_L_CRYPTO_LIB := n + +IPSEC_MB_VER ?= 0.46 +ISA_L_CRYPTO_VER := 2.18.0 + ifeq ($(MACHINE),$(filter $(MACHINE),x86_64)) -AESNI := y -else -AESNI := n +AESNI = y +# DPDK pre 17.08 depends on ISA-L Crypto library for GCM PMD + ifneq ($(firstword $(sort $(DPDK_VERSION), 17.08)), 17.08) + ISA_L_CRYPTO_LIB = y + IPSEC_MB_VER = 0.45 + $(info Building ISA-L Crypto $(ISA_L_CRYPTO_VER) library) + endif +$(info Building IPSec-MB $(IPSEC_MB_VER) library) endif -IPSEC_MB_VER := 0.45 AESNIMB_LIB_TARBALL := v$(IPSEC_MB_VER).tar.gz AESNIMB_LIB_TARBALL_URL := http://github.com/01org/intel-ipsec-mb/archive/$(AESNIMB_LIB_TARBALL) AESNIMB_LIB_SOURCE := $(B)/intel-ipsec-mb-$(IPSEC_MB_VER) -ISA_L_CRYPTO_VER := 2.18.0 + ISA_L_CRYPTO_LIB_TARBALL := v$(ISA_L_CRYPTO_VER).tar.gz ISA_L_CRYPTO_LIB_TARBALL_URL := http://github.com/01org/isa-l_crypto/archive/$(ISA_L_CRYPTO_LIB_TARBALL) ISA_L_CRYPTO_LIB_SOURCE := $(B)/isa-l_crypto-$(ISA_L_CRYPTO_VER) @@ -100,8 +110,10 @@ else DPDK_EXTRA_CFLAGS := -g -O0 endif +ifeq ($(ISA_L_CRYPTO_LIB),y) DPDK_EXTRA_CFLAGS += -I$(ISA_L_CRYPTO_INSTALL_DIR)/include -Wl,-z,muldefs DPDK_EXTRA_LDFLAGS += -L$(I)/lib +endif DPDK_MAKE_EXTRA_ARGS += AESNI_MULTI_BUFFER_LIB_PATH=$(AESNIMB_LIB_SOURCE) # assemble DPDK make arguments @@ -185,6 +197,8 @@ $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL): DPDK_DOWNLOADS = $(CURDIR)/$(DPDK_TARBALL) ifeq ($(AESNI),y) DPDK_DOWNLOADS += $(CURDIR)/$(AESNIMB_LIB_TARBALL) +endif +ifeq ($(ISA_L_CRYPTO_LIB),y) DPDK_DOWNLOADS += $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL) endif @@ -205,10 +219,12 @@ $(B)/.extract.ok: $(B)/.download.ok ifeq ($(AESNI),y) @echo --- extracting $(AESNIMB_LIB_TARBALL) --- @tar --directory $(B) --extract --file $(CURDIR)/$(AESNIMB_LIB_TARBALL) +endif +ifeq ($(ISA_L_CRYPTO_LIB),y) @echo --- extracting $(ISA_L_CRYPTO_LIB_TARBALL) --- @tar --directory $(B) --extract --file $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL) - @touch $@ endif + @touch $@ .PHONY: extract extract: $(B)/.extract.ok @@ -233,17 +249,11 @@ $(B)/.config.ok: $(B)/.patch.ok $(B)/custom-config .PHONY: config config: $(B)/.config.ok -# Order matters -ifeq ($(AESNI),y) -BUILD_TARGETS += build-ipsec-mb build-isal-crypto build-dpdk -else -BUILD_TARGETS += build-dpdk -endif - .PHONY: build-ipsec-mb build-ipsec-mb: mkdir -p $(I)/lib/ - make -C $(AESNIMB_LIB_SOURCE) -j NO_GCM=y + # Do not build GCM stuff if we are building ISA_L + make -C $(AESNIMB_LIB_SOURCE) -j NO_GCM=$(ISA_L_CRYPTO_LIB) cp $(AESNIMB_LIB_SOURCE)/libIPSec_MB.a $(I)/lib/ .PHONY: build-isal-crypto @@ -260,6 +270,15 @@ build-dpdk: @if [ ! -e $(B)/.config.ok ] ; then echo 'Please run "make config" first' && false ; fi @make $(DPDK_MAKE_ARGS) install +# Order matters +ifeq ($(AESNI),y) +BUILD_TARGETS += build-ipsec-mb +endif +ifeq ($(ISA_L_CRYPTO_LIB),y) +BUILD_TARGETS += build-isal-crypto +endif +BUILD_TARGETS += build-dpdk + $(B)/.build.ok: $(BUILD_TARGETS) @touch $@ diff --git a/src/configure.ac b/src/configure.ac index 4c2d3b47..6b6d9636 100644 --- a/src/configure.ac +++ b/src/configure.ac @@ -97,6 +97,26 @@ AC_DEFUN([DPDK_IS_PMD_ENABLED], m4_append_uniq([list_of_with], [$2], [, ]) ]) +AC_DEFUN([DETECT_DPDK_IS_1702_OR_1705], +[ + AC_MSG_CHECKING([for RTE_VERSION 17.02/17.05 in rte_version.h]) + AC_TRY_RUN( + [ + #include + int main() + { + return ((RTE_VER_YEAR != 17) || + (RTE_VER_MONTH != 2 && RTE_VER_MONTH != 5)); + } + ], + [dpdk_is_1702_or_1705=yes] + [AC_MSG_RESULT([yes])], + [dpdk_is_1702_or_1705=no] + [AC_MSG_RESULT([no])] + ) + AM_CONDITIONAL(DPDK_IS_1702_OR_1705, test "$dpdk_is_1702_or_1705" = "yes") +]) + ############################################################################### # configure arguments ############################################################################### @@ -185,34 +205,64 @@ AM_COND_IF([ENABLE_DPDK_SHARED], [AC_MSG_ERROR([DPDK shared library not found])],) ]) +with_aesni_mb_lib=no +with_isa_l_crypto_lib=no + DPDK_IS_PMD_ENABLED(LIBRTE_PMD_AESNI_MB, dpdk_aesni_mb_pmd) +DPDK_IS_PMD_ENABLED(LIBRTE_PMD_AESNI_GCM, dpdk_aesni_gcm_pmd) + +DETECT_DPDK_IS_1702_OR_1705() + AM_COND_IF([WITH_DPDK_AESNI_MB_PMD], [ - AC_CHECK_LIB([IPSec_MB], [submit_job_sse], [], + AC_CHECK_LIB([IPSec_MB], [submit_job_sse], + [with_aesni_mb_lib=yes], [AC_MSG_ERROR([IPSec_MB library not found])]) ]) -DPDK_IS_PMD_ENABLED(LIBRTE_PMD_AESNI_GCM, dpdk_aesni_gcm_pmd) AM_COND_IF([WITH_DPDK_AESNI_GCM_PMD], [ - AC_CHECK_LIB([isal_crypto], [aesni_gcm128_init], [], - [AC_MSG_ERROR([isal_crypto library not found])]) + AM_COND_IF([DPDK_IS_1702_OR_1705], + [ + AC_CHECK_LIB([isal_crypto], [aesni_gcm128_init], + [with_isa_l_crypto_lib=yes], + [AC_MSG_ERROR([isal_crypto library not found])]) + ], + [ + AC_CHECK_LIB([IPSec_MB], [submit_job_sse], + [with_aesni_mb_lib=yes], + [AC_MSG_ERROR([IPSec_MB library not found])]) + ]) ]) -DPDK_IS_PMD_ENABLED(LIBRTE_MLX5_PMD, dpdk_mlx5_pmd) -AM_COND_IF([WITH_DPDK_MLX5_PMD], +m4_append([list_of_with], [aesni_mb_lib], [, ]) +AM_CONDITIONAL(WITH_AESNI_MB_LIB, test "$with_aesni_mb_lib" = "yes") + +m4_append([list_of_with], [isa_l_crypto_lib], [, ]) +AM_CONDITIONAL(WITH_ISA_L_CRYPTO_LIB, test "$with_isa_l_crypto_lib" = "yes") + + +with_ibverbs_lib=no +DPDK_IS_PMD_ENABLED(LIBRTE_MLX4_PMD, dpdk_mlx4_pmd) +AM_COND_IF([WITH_DPDK_MLX4_PMD], [ - AC_CHECK_LIB([ibverbs], [ibv_fork_init], [], + AC_CHECK_LIB([ibverbs], [ibv_fork_init], + [with_ibverbs_lib=yes], [AC_MSG_ERROR([ibverbs library not found])]) ]) -DPDK_IS_PMD_ENABLED(LIBRTE_MLX4_PMD, dpdk_mlx4_pmd) -AM_COND_IF([WITH_DPDK_MLX4_PMD], +DPDK_IS_PMD_ENABLED(LIBRTE_MLX5_PMD, dpdk_mlx5_pmd) +AM_COND_IF([WITH_DPDK_MLX5_PMD], [ - AC_CHECK_LIB([ibverbs], [ibv_fork_init], [], + AC_CHECK_LIB([ibverbs], [ibv_fork_init], + [with_ibverbs_lib=yes], [AC_MSG_ERROR([ibverbs library not found])]) ]) +m4_append([list_of_with], [ibverbs_lib], [, ]) +AM_CONDITIONAL(WITH_IBVERBS_LIB, test "$with_ibverbs_lib" = "yes") + + AM_COND_IF([ENABLE_G2], [ PKG_CHECK_MODULES(g2, gtk+-2.0) diff --git a/src/plugins/dpdk.am b/src/plugins/dpdk.am index 3a1ffeeb..15195a21 100644 --- a/src/plugins/dpdk.am +++ b/src/plugins/dpdk.am @@ -19,20 +19,24 @@ dpdk_plugin_la_LDFLAGS = $(AM_LDFLAGS) -ldpdk else dpdk_plugin_la_LDFLAGS = $(AM_LDFLAGS) -Wl,--whole-archive,-l:libdpdk.a,--no-whole-archive endif -if WITH_DPDK_AESNI_MB_PMD +if WITH_AESNI_MB_LIB dpdk_plugin_la_LDFLAGS += -Wl,--exclude-libs,libIPSec_MB.a,-l:libIPSec_MB.a endif -if WITH_DPDK_AESNI_GCM_PMD +if WITH_ISA_L_CRYPTO_LIB dpdk_plugin_la_LDFLAGS += -Wl,--exclude-libs,libisal_crypto.a,-l:libisal_crypto.a endif -dpdk_plugin_la_LDFLAGS += -Wl,-lm,-ldl -if WITH_DPDK_MLX5_PMD +if WITH_IBVERBS_LIB dpdk_plugin_la_LDFLAGS += -Wl,-libverbs endif -if WITH_DPDK_MLX4_PMD -dpdk_plugin_la_LDFLAGS += -Wl,-libverbs +if DPDK_IS_1702_OR_1705 +dpdk_plugin_la_CFLAGS = $(AM_CFLAGS) -DDPDK_VOID_CALLBACK=1 -DDPDK_NO_AEAD=1 +else +dpdk_plugin_la_CFLAGS = $(AM_CFLAGS) -DDPDK_VOID_CALLBACK=0 -DDPDK_NO_AEAD=0 +dpdk_plugin_la_LDFLAGS += -Wl,-lnuma endif +dpdk_plugin_la_LDFLAGS += -Wl,-lm,-ldl + dpdk_plugin_la_SOURCES = \ dpdk/main.c \ dpdk/buffer.c \ diff --git a/src/plugins/dpdk/device/common.c b/src/plugins/dpdk/device/common.c index df52c58f..2707b4d8 100644 --- a/src/plugins/dpdk/device/common.c +++ b/src/plugins/dpdk/device/common.c @@ -181,9 +181,9 @@ dpdk_device_stop (dpdk_device_t * xd) } } -void -dpdk_port_state_callback (uint8_t port_id, - enum rte_eth_event_type type, void *param) +always_inline int +dpdk_port_state_callback_inline (uint8_t port_id, + enum rte_eth_event_type type, void *param) { struct rte_eth_link link; vlib_main_t *vm = vlib_get_main (); @@ -193,7 +193,7 @@ dpdk_port_state_callback (uint8_t port_id, if (type != RTE_ETH_EVENT_INTR_LSC) { clib_warning ("Unknown event %d received for port %d", type, port_id); - return; + return -1; } rte_eth_link_get_nowait (port_id, &link); @@ -238,8 +238,28 @@ dpdk_port_state_callback (uint8_t port_id, else clib_warning ("Port %d Link Down\n\n", port_id); } + + return 0; +} + +#if DPDK_VOID_CALLBACK +void +dpdk_port_state_callback (uint8_t port_id, + enum rte_eth_event_type type, void *param) +{ + dpdk_port_state_callback_inline (port_id, type, param); } +#else +int +dpdk_port_state_callback (uint8_t port_id, + enum rte_eth_event_type type, + void *param, + void *ret_param __attribute__ ((unused))) +{ + return dpdk_port_state_callback_inline (port_id, type, param); +} +#endif /* * fd.io coding-style-patch-verification: ON * diff --git a/src/plugins/dpdk/device/dpdk.h b/src/plugins/dpdk/device/dpdk.h index 29a2c760..1e34e3fb 100644 --- a/src/plugins/dpdk/device/dpdk.h +++ b/src/plugins/dpdk/device/dpdk.h @@ -418,8 +418,15 @@ typedef struct void dpdk_device_setup (dpdk_device_t * xd); void dpdk_device_start (dpdk_device_t * xd); void dpdk_device_stop (dpdk_device_t * xd); + +#if DPDK_VOID_CALLBACK void dpdk_port_state_callback (uint8_t port_id, enum rte_eth_event_type type, void *param); +#else +int dpdk_port_state_callback (uint8_t port_id, + enum rte_eth_event_type type, + void *param, void *ret_param); +#endif #define foreach_dpdk_error \ _(NONE, "no error") \ diff --git a/src/plugins/dpdk/ipsec/cli.c b/src/plugins/dpdk/ipsec/cli.c index a9314065..a9cf2502 100644 --- a/src/plugins/dpdk/ipsec/cli.c +++ b/src/plugins/dpdk/ipsec/cli.c @@ -86,13 +86,28 @@ dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display) hash_foreach (key, data, cwm->algo_qp_map, ({ cap.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC; +#if DPDK_NO_AEAD cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER; cap.sym.cipher.algo = p_key->cipher_algo; +#else + if (p_key->is_aead) + { + cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD; + cap.sym.aead.algo = p_key->cipher_algo; + } + else + { + cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER; + cap.sym.cipher.algo = p_key->cipher_algo; + } +#endif check_algo_is_supported (&cap, cipher_str); + cap.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC; cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH; cap.sym.auth.algo = p_key->auth_algo; check_algo_is_supported (&cap, auth_str); + vlib_cli_output (vm, "%u\t%10s\t%15s\t%3s\t%u\t%u\n", vlib_mains[i]->thread_index, cipher_str, auth_str, p_key->is_outbound ? "out" : "in", diff --git a/src/plugins/dpdk/ipsec/esp.h b/src/plugins/dpdk/ipsec/esp.h index 56f0c756..308a66af 100644 --- a/src/plugins/dpdk/ipsec/esp.h +++ b/src/plugins/dpdk/ipsec/esp.h @@ -22,6 +22,9 @@ typedef struct { enum rte_crypto_cipher_algorithm algo; +#if ! DPDK_NO_AEAD + enum rte_crypto_aead_algorithm aead_algo; +#endif u8 key_len; u8 iv_len; } dpdk_esp_crypto_alg_t; @@ -65,7 +68,11 @@ dpdk_esp_init () c->iv_len = 16; c = &em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_GCM_128]; +#if DPDK_NO_AEAD c->algo = RTE_CRYPTO_CIPHER_AES_GCM; +#else + c->aead_algo = RTE_CRYPTO_AEAD_AES_GCM; +#endif c->key_len = 16; c->iv_len = 8; @@ -90,42 +97,68 @@ dpdk_esp_init () i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256]; i->algo = RTE_CRYPTO_AUTH_SHA512_HMAC; i->trunc_size = 32; - +#if DPDK_NO_AEAD i = &em->esp_integ_algs[IPSEC_INTEG_ALG_AES_GCM_128]; i->algo = RTE_CRYPTO_AUTH_AES_GCM; i->trunc_size = 16; +#endif } static_always_inline int translate_crypto_algo (ipsec_crypto_alg_t crypto_algo, - struct rte_crypto_sym_xform *cipher_xform) + struct rte_crypto_sym_xform *xform, u8 use_esn) { +#if ! DPDK_NO_AEAD + const u16 iv_off = + sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op) + + offsetof (dpdk_cop_priv_t, cb); +#endif + + xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + switch (crypto_algo) { case IPSEC_CRYPTO_ALG_NONE: - cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_NULL; +#if ! DPDK_NO_AEAD + xform->cipher.iv.offset = iv_off; + xform->cipher.iv.length = 0; +#endif + xform->cipher.algo = RTE_CRYPTO_CIPHER_NULL; break; case IPSEC_CRYPTO_ALG_AES_CBC_128: case IPSEC_CRYPTO_ALG_AES_CBC_192: case IPSEC_CRYPTO_ALG_AES_CBC_256: - cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; +#if ! DPDK_NO_AEAD + xform->cipher.iv.offset = iv_off; + xform->cipher.iv.length = 16; +#endif + xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; break; case IPSEC_CRYPTO_ALG_AES_GCM_128: - cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM; +#if DPDK_NO_AEAD + xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM; +#else + xform->type = RTE_CRYPTO_SYM_XFORM_AEAD; + xform->aead.algo = RTE_CRYPTO_AEAD_AES_GCM; + xform->aead.iv.offset = iv_off; + xform->aead.iv.length = 12; /* GCM IV, not ESP IV */ + xform->aead.digest_length = 16; + xform->aead.aad_length = use_esn ? 12 : 8; +#endif break; default: return -1; } - cipher_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - return 0; } static_always_inline int translate_integ_algo (ipsec_integ_alg_t integ_alg, - struct rte_crypto_sym_xform *auth_xform, int use_esn) + struct rte_crypto_sym_xform *auth_xform, u8 use_esn) { + auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; + switch (integ_alg) { case IPSEC_INTEG_ALG_NONE: @@ -152,21 +185,21 @@ translate_integ_algo (ipsec_integ_alg_t integ_alg, auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA512_HMAC; auth_xform->auth.digest_length = 32; break; +#if DPDK_NO_AEAD case IPSEC_INTEG_ALG_AES_GCM_128: auth_xform->auth.algo = RTE_CRYPTO_AUTH_AES_GCM; auth_xform->auth.digest_length = 16; auth_xform->auth.add_auth_data_length = use_esn ? 12 : 8; break; +#endif default: return -1; } - auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; - return 0; } -static_always_inline int +static_always_inline i32 create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess, u8 is_outbound) { @@ -178,6 +211,10 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess, struct rte_crypto_sym_xform *xfs; uword key = 0, *data; crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key; +#if ! DPDK_NO_AEAD + i32 socket_id = rte_socket_id (); + i32 ret; +#endif if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) { @@ -190,15 +227,7 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess, sa->salt = random_u32 (&seed); } - cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; - cipher_xform.cipher.key.data = sa->crypto_key; - cipher_xform.cipher.key.length = sa->crypto_key_len; - - auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; - auth_xform.auth.key.data = sa->integ_key; - auth_xform.auth.key.length = sa->integ_key_len; - - if (translate_crypto_algo (sa->crypto_alg, &cipher_xform) < 0) + if (translate_crypto_algo (sa->crypto_alg, &cipher_xform, sa->use_esn) < 0) return -1; p_key->cipher_algo = cipher_xform.cipher.algo; @@ -206,19 +235,44 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess, return -1; p_key->auth_algo = auth_xform.auth.algo; - if (is_outbound) +#if ! DPDK_NO_AEAD + if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) { - cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; - auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; - cipher_xform.next = &auth_xform; + cipher_xform.aead.key.data = sa->crypto_key; + cipher_xform.aead.key.length = sa->crypto_key_len; + + if (is_outbound) + cipher_xform.cipher.op = RTE_CRYPTO_AEAD_OP_ENCRYPT; + else + cipher_xform.cipher.op = RTE_CRYPTO_AEAD_OP_DECRYPT; + cipher_xform.next = NULL; xfs = &cipher_xform; + p_key->is_aead = 1; } - else + else /* Cipher + Auth */ +#endif { - cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; - auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; - auth_xform.next = &cipher_xform; - xfs = &auth_xform; + cipher_xform.cipher.key.data = sa->crypto_key; + cipher_xform.cipher.key.length = sa->crypto_key_len; + + auth_xform.auth.key.data = sa->integ_key; + auth_xform.auth.key.length = sa->integ_key_len; + + if (is_outbound) + { + cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; + cipher_xform.next = &auth_xform; + xfs = &cipher_xform; + } + else + { + cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; + auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; + auth_xform.next = &cipher_xform; + xfs = &auth_xform; + } + p_key->is_aead = 0; } p_key->is_outbound = is_outbound; @@ -227,17 +281,115 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess, if (!data) return -1; +#if DPDK_NO_AEAD sa_sess->sess = rte_cryptodev_sym_session_create (cwm->qp_data[*data].dev_id, xfs); - if (!sa_sess->sess) return -1; +#else + sa_sess->sess = + rte_cryptodev_sym_session_create (dcm->sess_h_pools[socket_id]); + if (!sa_sess->sess) + return -1; + + ret = + rte_cryptodev_sym_session_init (cwm->qp_data[*data].dev_id, sa_sess->sess, + xfs, dcm->sess_pools[socket_id]); + if (ret) + return -1; +#endif sa_sess->qp_index = (u8) * data; return 0; } +static_always_inline void +crypto_set_icb (dpdk_gcm_cnt_blk * icb, u32 salt, u32 seq, u32 seq_hi) +{ + icb->salt = salt; + icb->iv[0] = seq; + icb->iv[1] = seq_hi; +#if DPDK_NO_AEAD + icb->cnt = clib_host_to_net_u32 (1); +#endif +} + +#define __unused __attribute__((unused)) +static_always_inline void +crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0, + struct rte_crypto_op *cop, void *session, + u32 cipher_off, u32 cipher_len, + u8 * icb __unused, u32 iv_size __unused, + u32 auth_off, u32 auth_len, + u8 * aad __unused, u32 aad_size __unused, + u8 * digest, u64 digest_paddr, u32 digest_size __unused) +{ + struct rte_crypto_sym_op *sym_cop; + + sym_cop = (struct rte_crypto_sym_op *) (cop + 1); + + sym_cop->m_src = mb0; + rte_crypto_op_attach_sym_session (cop, session); + + if (!digest_paddr) + digest_paddr = + rte_pktmbuf_mtophys_offset (mb0, (uintptr_t) digest - (uintptr_t) mb0); + +#if DPDK_NO_AEAD + sym_cop->cipher.data.offset = cipher_off; + sym_cop->cipher.data.length = cipher_len; + + sym_cop->cipher.iv.data = icb; + sym_cop->cipher.iv.phys_addr = + cop->phys_addr + (uintptr_t) icb - (uintptr_t) cop; + sym_cop->cipher.iv.length = iv_size; + + if (is_aead) + { + sym_cop->auth.aad.data = aad; + sym_cop->auth.aad.phys_addr = + cop->phys_addr + (uintptr_t) aad - (uintptr_t) cop; + sym_cop->auth.aad.length = aad_size; + } + else + { + sym_cop->auth.data.offset = auth_off; + sym_cop->auth.data.length = auth_len; + } + + sym_cop->auth.digest.data = digest; + sym_cop->auth.digest.phys_addr = digest_paddr; + sym_cop->auth.digest.length = digest_size; +#else /* ! DPDK_NO_AEAD */ + if (is_aead) + { + sym_cop->aead.data.offset = cipher_off; + sym_cop->aead.data.length = cipher_len; + + sym_cop->aead.aad.data = aad; + sym_cop->aead.aad.phys_addr = + cop->phys_addr + (uintptr_t) aad - (uintptr_t) cop; + + sym_cop->aead.digest.data = digest; + sym_cop->aead.digest.phys_addr = digest_paddr; + } + else + { + sym_cop->cipher.data.offset = cipher_off; + sym_cop->cipher.data.length = cipher_len; + + sym_cop->auth.data.offset = auth_off; + sym_cop->auth.data.length = auth_len; + + sym_cop->auth.digest.data = digest; + sym_cop->auth.digest.phys_addr = digest_paddr; + } +#endif /* DPDK_NO_AEAD */ +} + +#undef __unused + #endif /* __DPDK_ESP_H__ */ /* diff --git a/src/plugins/dpdk/ipsec/esp_decrypt.c b/src/plugins/dpdk/ipsec/esp_decrypt.c index 9377970a..c4f295d3 100644 --- a/src/plugins/dpdk/ipsec/esp_decrypt.c +++ b/src/plugins/dpdk/ipsec/esp_decrypt.c @@ -44,8 +44,7 @@ typedef enum { _(NOT_IP, "Not IP packet (dropped)") \ _(ENQ_FAIL, "Enqueue failed (buffer full)") \ _(NO_CRYPTODEV, "Cryptodev not configured") \ - _(BAD_LEN, "Invalid ciphertext length") \ - _(UNSUPPORTED, "Cipher/Auth not supported") + _(BAD_LEN, "Invalid ciphertext length") typedef enum { @@ -122,7 +121,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, while (n_left_from > 0 && n_left_to_next > 0) { - u32 bi0, sa_index0 = ~0, seq, icv_size, iv_size; + u32 bi0, sa_index0 = ~0, seq, trunc_size, iv_size; vlib_buffer_t * b0; esp_header_t * esp0; ipsec_sa_t * sa0; @@ -169,18 +168,6 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, sa0->total_data_size += b0->current_length; - if (PREDICT_FALSE(sa0->integ_alg == IPSEC_INTEG_ALG_NONE) || - PREDICT_FALSE(sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE)) - { - clib_warning ("SPI %u : only cipher + auth supported", sa0->spi); - vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, - ESP_DECRYPT_ERROR_UNSUPPORTED, 1); - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; - goto trace; - } - sa_sess = pool_elt_at_index(cwm->sa_sess_d[0], sa_index0); if (PREDICT_FALSE(!sa_sess->sess)) @@ -211,7 +198,10 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, rte_crypto_op_attach_sym_session(cop, sess); - icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size; + if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) + trunc_size = 16; + else + trunc_size = em->esp_integ_algs[sa0->integ_alg].trunc_size; iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len; /* Convert vlib buffer to mbuf */ @@ -222,7 +212,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, /* Outer IP header has already been stripped */ u16 payload_len = rte_pktmbuf_pkt_len(mb0) - sizeof (esp_header_t) - - iv_size - icv_size; + iv_size - trunc_size; if ((payload_len & (BLOCK_SIZE - 1)) || (payload_len <= 0)) { @@ -242,84 +232,64 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, struct rte_crypto_sym_op *sym_cop = (struct rte_crypto_sym_op *)(cop + 1); - sym_cop->m_src = mb0; - sym_cop->cipher.data.offset = sizeof (esp_header_t) + iv_size; - sym_cop->cipher.data.length = payload_len; + u8 is_aead = sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128; + u32 cipher_off, cipher_len; + u32 auth_off = 0, auth_len = 0, aad_size = 0; + u8 *aad = NULL, *digest = NULL; + u64 digest_paddr = 0; u8 *iv = rte_pktmbuf_mtod_offset(mb0, void*, sizeof (esp_header_t)); - dpdk_cop_priv_t * priv = (dpdk_cop_priv_t *)(sym_cop + 1); + dpdk_cop_priv_t *priv = (dpdk_cop_priv_t *)(sym_cop + 1); + dpdk_gcm_cnt_blk *icb = &priv->cb; + + cipher_off = sizeof (esp_header_t) + iv_size; + cipher_len = payload_len; - if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) + digest = + vlib_buffer_get_current (b0) + sizeof(esp_header_t) + + iv_size + payload_len; + + if (is_aead) { - dpdk_gcm_cnt_blk *icb = &priv->cb; - icb->salt = sa0->salt; - clib_memcpy(icb->iv, iv, 8); - icb->cnt = clib_host_to_net_u32(1); - sym_cop->cipher.iv.data = (u8 *)icb; - sym_cop->cipher.iv.phys_addr = cop->phys_addr + - (uintptr_t)icb - (uintptr_t)cop; - sym_cop->cipher.iv.length = 16; - - u8 *aad = priv->aad; - clib_memcpy(aad, iv - sizeof(esp_header_t), 8); - sym_cop->auth.aad.data = aad; - sym_cop->auth.aad.phys_addr = cop->phys_addr + - (uintptr_t)aad - (uintptr_t)cop; - if (sa0->use_esn) - { - *((u32*)&aad[8]) = sa0->seq_hi; - sym_cop->auth.aad.length = 12; - } - else - { - sym_cop->auth.aad.length = 8; - } + u32 *_iv = (u32 *) iv; - sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*, - rte_pktmbuf_pkt_len(mb0) - icv_size); - sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0, - rte_pktmbuf_pkt_len(mb0) - icv_size); - sym_cop->auth.digest.length = icv_size; + crypto_set_icb (icb, sa0->salt, _iv[0], _iv[1]); + iv_size = 16; + aad = priv->aad; + clib_memcpy(aad, esp0, 8); + aad_size = 8; + if (sa0->use_esn) + { + *((u32*)&aad[8]) = sa0->seq_hi; + aad_size = 12; + } } else { - sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(mb0, void*, - sizeof (esp_header_t)); - sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(mb0, - sizeof (esp_header_t)); - sym_cop->cipher.iv.length = iv_size; + clib_memcpy(icb, iv, 16); + + auth_off = 0; + auth_len = sizeof(esp_header_t) + iv_size + payload_len; if (sa0->use_esn) { dpdk_cop_priv_t* priv = (dpdk_cop_priv_t*) (sym_cop + 1); - u8* payload_end = rte_pktmbuf_mtod_offset( - mb0, u8*, sizeof(esp_header_t) + iv_size + payload_len); - - clib_memcpy (priv->icv, payload_end, icv_size); - *((u32*) payload_end) = sa0->seq_hi; - sym_cop->auth.data.offset = 0; - sym_cop->auth.data.length = sizeof(esp_header_t) + iv_size - + payload_len + sizeof(sa0->seq_hi); - sym_cop->auth.digest.data = priv->icv; - sym_cop->auth.digest.phys_addr = cop->phys_addr - + (uintptr_t) priv->icv - (uintptr_t) cop; - sym_cop->auth.digest.length = icv_size; - } - else - { - sym_cop->auth.data.offset = 0; - sym_cop->auth.data.length = sizeof(esp_header_t) + - iv_size + payload_len; - - sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*, - rte_pktmbuf_pkt_len(mb0) - icv_size); - sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0, - rte_pktmbuf_pkt_len(mb0) - icv_size); - sym_cop->auth.digest.length = icv_size; + + clib_memcpy (priv->icv, digest, trunc_size); + *((u32*) digest) = sa0->seq_hi; + auth_len += sizeof(sa0->seq_hi); + + digest = priv->icv; + digest_paddr = + cop->phys_addr + (uintptr_t) priv->icv - (uintptr_t) cop; } } + crypto_op_setup (is_aead, mb0, cop, sess, + cipher_off, cipher_len, (u8 *) icb, iv_size, + auth_off, auth_len, aad, aad_size, + digest, digest_paddr, trunc_size); trace: if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -339,6 +309,9 @@ trace: { u32 enq; + if (!n_cop_qp[i]) + continue; + qpd = vec_elt_at_index(cwm->qp_data, i); enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id, qpd->cops, n_cop_qp[i]); @@ -433,7 +406,7 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm, while (n_left_from > 0 && n_left_to_next > 0) { esp_footer_t * f0; - u32 bi0, next0, icv_size, iv_size; + u32 bi0, next0, trunc_size, iv_size; vlib_buffer_t * b0 = 0; ip4_header_t *ih4 = 0, *oh4 = 0; ip6_header_t *ih6 = 0, *oh6 = 0; @@ -455,7 +428,10 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm, to_next[0] = bi0; to_next += 1; - icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size; + if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) + trunc_size = 16; + else + trunc_size = em->esp_integ_algs[sa0->integ_alg].trunc_size; iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len; if (sa0->use_anti_replay) @@ -472,7 +448,7 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm, ih4 = (ip4_header_t *) (b0->data + sizeof(ethernet_header_t)); vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size); - b0->current_length -= (icv_size + 2); + b0->current_length -= (trunc_size + 2); b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; f0 = (esp_footer_t *) ((u8 *) vlib_buffer_get_current (b0) + b0->current_length); diff --git a/src/plugins/dpdk/ipsec/esp_encrypt.c b/src/plugins/dpdk/ipsec/esp_encrypt.c index ac552f6c..6de444fd 100644 --- a/src/plugins/dpdk/ipsec/esp_encrypt.c +++ b/src/plugins/dpdk/ipsec/esp_encrypt.c @@ -43,8 +43,7 @@ typedef enum _(RX_PKTS, "ESP pkts received") \ _(SEQ_CYCLED, "sequence number cycled") \ _(ENQ_FAIL, "Enqueue failed (buffer full)") \ - _(NO_CRYPTODEV, "Cryptodev not configured") \ - _(UNSUPPORTED, "Cipher/Auth not supported") + _(NO_CRYPTODEV, "Cryptodev not configured") typedef enum @@ -142,6 +141,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, const int BLOCK_SIZE = 16; u32 iv_size; u16 orig_sz; + u8 trunc_size; crypto_sa_session_t *sa_sess; void *sess; struct rte_crypto_op *cop = 0; @@ -199,6 +199,11 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, ssize_t adv; iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len; + if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) + trunc_size = 16; + else + trunc_size = em->esp_integ_algs[sa0->integ_alg].trunc_size; + ih0 = vlib_buffer_get_current (b0); orig_sz = b0->current_length; is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60; @@ -314,9 +319,6 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, transport_mode = 1; } - ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG); - ASSERT (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE); - int blocks = 1 + (orig_sz + 1) / BLOCK_SIZE; /* pad packet in input buffer */ @@ -330,8 +332,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, f0 = vlib_buffer_get_current (b0) + b0->current_length + pad_bytes; f0->pad_length = pad_bytes; f0->next_header = next_hdr_type; - b0->current_length += pad_bytes + 2 + - em->esp_integ_algs[sa0->integ_alg].trunc_size; + b0->current_length += pad_bytes + 2 + trunc_size; vnet_buffer (b0)->sw_if_index[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX]; @@ -349,88 +350,64 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, mb0->pkt_len = b0->current_length; mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data; - rte_crypto_op_attach_sym_session (cop, sess); + dpdk_gcm_cnt_blk *icb = &priv->cb; - sym_cop->m_src = mb0; + crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi); - dpdk_gcm_cnt_blk *icb = &priv->cb; - icb->salt = sa0->salt; - icb->iv[0] = sa0->seq; - icb->iv[1] = sa0->seq_hi; - icb->cnt = clib_host_to_net_u32 (1); + u8 is_aead = sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128; + u32 cipher_off, cipher_len; + u32 auth_off = 0, auth_len = 0, aad_size = 0; + u8 *aad = NULL, *digest = NULL; - if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) + if (is_aead) { u32 *esp_iv = (u32 *) (b0->data + b0->current_data + ip_hdr_size + sizeof (esp_header_t)); esp_iv[0] = sa0->seq; esp_iv[1] = sa0->seq_hi; - sym_cop->cipher.data.offset = - ip_hdr_size + sizeof (esp_header_t) + iv_size; - sym_cop->cipher.data.length = BLOCK_SIZE * blocks; - sym_cop->cipher.iv.length = 16; - } - else - { - sym_cop->cipher.data.offset = - ip_hdr_size + sizeof (esp_header_t); - sym_cop->cipher.data.length = BLOCK_SIZE * blocks + iv_size; - sym_cop->cipher.iv.length = iv_size; - } - sym_cop->cipher.iv.data = (u8 *) icb; - sym_cop->cipher.iv.phys_addr = cop->phys_addr + (uintptr_t) icb - - (uintptr_t) cop; + cipher_off = ip_hdr_size + sizeof (esp_header_t) + iv_size; + cipher_len = BLOCK_SIZE * blocks; + iv_size = 16; /* GCM IV size, not ESP IV size */ - - ASSERT (sa0->integ_alg < IPSEC_INTEG_N_ALG); - ASSERT (sa0->integ_alg != IPSEC_INTEG_ALG_NONE); - - if (PREDICT_FALSE (sa0->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128)) - { - u8 *aad = priv->aad; + aad = priv->aad; clib_memcpy (aad, vlib_buffer_get_current (b0) + ip_hdr_size, 8); - sym_cop->auth.aad.data = aad; - sym_cop->auth.aad.phys_addr = cop->phys_addr + - (uintptr_t) aad - (uintptr_t) cop; - + aad_size = 8; if (PREDICT_FALSE (sa0->use_esn)) { *((u32 *) & aad[8]) = sa0->seq_hi; - sym_cop->auth.aad.length = 12; - } - else - { - sym_cop->auth.aad.length = 8; + aad_size = 12; } + + digest = + vlib_buffer_get_current (b0) + b0->current_length - + trunc_size; } else { - sym_cop->auth.data.offset = ip_hdr_size; - sym_cop->auth.data.length = b0->current_length - ip_hdr_size - - em->esp_integ_algs[sa0->integ_alg].trunc_size; + cipher_off = ip_hdr_size + sizeof (esp_header_t); + cipher_len = BLOCK_SIZE * blocks + iv_size; + + auth_off = ip_hdr_size; + auth_len = b0->current_length - ip_hdr_size - trunc_size; + + digest = + vlib_buffer_get_current (b0) + b0->current_length - + trunc_size; if (PREDICT_FALSE (sa0->use_esn)) { - u8 *payload_end = - vlib_buffer_get_current (b0) + b0->current_length; - *((u32 *) payload_end) = sa0->seq_hi; - sym_cop->auth.data.length += sizeof (sa0->seq_hi); + *((u32 *) digest) = sa0->seq_hi; + auth_len += sizeof (sa0->seq_hi); } } - sym_cop->auth.digest.data = vlib_buffer_get_current (b0) + - b0->current_length - - em->esp_integ_algs[sa0->integ_alg].trunc_size; - sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset (mb0, - b0->current_length - - - em->esp_integ_algs - [sa0->integ_alg].trunc_size); - sym_cop->auth.digest.length = - em->esp_integ_algs[sa0->integ_alg].trunc_size; + crypto_op_setup (is_aead, mb0, cop, sess, + cipher_off, cipher_len, (u8 *) icb, iv_size, + auth_off, auth_len, aad, aad_size, + digest, 0, trunc_size); if (PREDICT_FALSE (is_ipv6)) { @@ -470,6 +447,9 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, { u32 enq; + if (!n_cop_qp[i]) + continue; + qpd = vec_elt_at_index(cwm->qp_data, i); enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id, qpd->cops, n_cop_qp[i]); diff --git a/src/plugins/dpdk/ipsec/ipsec.c b/src/plugins/dpdk/ipsec/ipsec.c index 7066564d..c922940c 100644 --- a/src/plugins/dpdk/ipsec/ipsec.c +++ b/src/plugins/dpdk/ipsec/ipsec.c @@ -56,18 +56,23 @@ add_del_sa_sess (u32 sa_index, u8 is_add) else { u8 dev_id; + i32 ret; sa_sess = pool_elt_at_index (cwm->sa_sess_d[is_outbound], sa_index); dev_id = cwm->qp_data[sa_sess->qp_index].dev_id; if (!sa_sess->sess) continue; - - if (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess)) - { - clib_warning("failed to free session"); - return -1; - } +#if DPDK_NO_AEAD + ret = (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess) == NULL); + ASSERT (ret); +#else + ret = rte_cryptodev_sym_session_clear(dev_id, sa_sess->sess); + ASSERT (!ret); + + ret = rte_cryptodev_sym_session_free(sa_sess->sess); + ASSERT (!ret); +#endif memset(sa_sess, 0, sizeof(sa_sess[0])); } } @@ -94,7 +99,7 @@ update_qp_data (crypto_worker_main_t * cwm, } /* *INDENT-ON* */ - vec_add2 (cwm->qp_data, qpd, 1); + vec_add2_aligned (cwm->qp_data, qpd, 1, CLIB_CACHE_LINE_BYTES); qpd->dev_id = cdev_id; qpd->qp_id = qp_id; @@ -119,6 +124,9 @@ add_mapping (crypto_worker_main_t * cwm, p_key->cipher_algo = (u8) cipher_cap->sym.cipher.algo; p_key->auth_algo = (u8) auth_cap->sym.auth.algo; p_key->is_outbound = is_outbound; +#if ! DPDK_NO_AEAD + p_key->is_aead = cipher_cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD; +#endif ret = hash_get (cwm->algo_qp_map, key); if (ret) @@ -147,6 +155,20 @@ add_cdev_mapping (crypto_worker_main_t * cwm, for (i = dev_info->capabilities; i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) { +#if ! DPDK_NO_AEAD + if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) + { + struct rte_cryptodev_capabilities none = { 0 }; + + if (check_algo_is_supported (i, NULL) != 0) + continue; + + none.sym.auth.algo = RTE_CRYPTO_AUTH_NULL; + + mapped |= add_mapping (cwm, cdev_id, qp, is_outbound, i, &none); + continue; + } +#endif if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) continue; @@ -205,17 +227,23 @@ dpdk_ipsec_check_support (ipsec_sa_t * sa) { if (sa->integ_alg != IPSEC_INTEG_ALG_NONE) return clib_error_return (0, "unsupported integ-alg %U with " - "crypto-algo aes-gcm-128", + "crypto-alg aes-gcm-128", format_ipsec_integ_alg, sa->integ_alg); +#if DPDK_NO_AEAD sa->integ_alg = IPSEC_INTEG_ALG_AES_GCM_128; +#endif } - else - { - if (sa->integ_alg == IPSEC_INTEG_ALG_NONE || - sa->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128) - return clib_error_return (0, "unsupported integ-alg %U", - format_ipsec_integ_alg, sa->integ_alg); - } +#if DPDK_NO_AEAD + else if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE || + sa->integ_alg == IPSEC_INTEG_ALG_NONE || + sa->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128) +#else + else if (sa->integ_alg == IPSEC_INTEG_ALG_NONE) +#endif + return clib_error_return (0, + "unsupported integ-alg %U with crypto-alg %U", + format_ipsec_integ_alg, sa->integ_alg, + format_ipsec_crypto_alg, sa->crypto_alg); return 0; } @@ -233,6 +261,10 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, struct rte_mempool *rmp; i32 dev_id, ret; u32 i, skip_master; +#if ! DPDK_NO_AEAD + u32 max_sess_size = 0, sess_size; + i8 socket_id; +#endif if (check_cryptodev_queues () < 0) { @@ -297,9 +329,10 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, dev_conf.socket_id = rte_cryptodev_socket_id (dev_id); dev_conf.nb_queue_pairs = cdev_info.max_nb_queue_pairs; +#if DPDK_NO_AEAD dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_SESS_OBJS; dev_conf.session_mp.cache_size = DPDK_CRYPTO_CACHE_SIZE; - +#endif ret = rte_cryptodev_configure (dev_id, &dev_conf); if (ret < 0) { @@ -310,16 +343,26 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC; for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++) { +#if DPDK_NO_AEAD ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf, dev_conf.socket_id); +#else + ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf, + dev_conf.socket_id, NULL); +#endif if (ret < 0) { clib_warning ("cryptodev %u qp %u setup error", dev_id, qp); goto error; } } - vec_validate_aligned (dcm->cop_pools, dev_conf.socket_id, - CLIB_CACHE_LINE_BYTES); + vec_validate (dcm->cop_pools, dev_conf.socket_id); + +#if ! DPDK_NO_AEAD + sess_size = rte_cryptodev_get_private_session_size (dev_id); + if (sess_size > max_sess_size) + max_sess_size = sess_size; +#endif if (!vec_elt (dcm->cop_pools, dev_conf.socket_id)) { @@ -333,14 +376,14 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, DPDK_CRYPTO_CACHE_SIZE, DPDK_CRYPTO_PRIV_SIZE, dev_conf.socket_id); - vec_free (pool_name); if (!rmp) { - clib_warning ("failed to allocate mempool on socket %u", - dev_conf.socket_id); + clib_warning ("failed to allocate %s", pool_name); + vec_free (pool_name); goto error; } + vec_free (pool_name); vec_elt (dcm->cop_pools, dev_conf.socket_id) = rmp; } @@ -348,6 +391,51 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, DPDK_CRYPTO_NB_SESS_OBJS, DPDK_CRYPTO_CACHE_SIZE); } +#if ! DPDK_NO_AEAD + /* *INDENT-OFF* */ + vec_foreach_index (socket_id, dcm->cop_pools) + { + u8 *pool_name; + + if (!vec_elt (dcm->cop_pools, socket_id)) + continue; + + vec_validate (dcm->sess_h_pools, socket_id); + pool_name = format (0, "crypto_sess_h_socket%u%c", + socket_id, 0); + rmp = + rte_mempool_create((i8 *)pool_name, DPDK_CRYPTO_NB_SESS_OBJS, + rte_cryptodev_get_header_session_size (), + 512, 0, NULL, NULL, NULL, NULL, + socket_id, 0); + if (!rmp) + { + clib_warning ("failed to allocate %s", pool_name); + vec_free (pool_name); + goto error; + } + vec_free (pool_name); + vec_elt (dcm->sess_h_pools, socket_id) = rmp; + + vec_validate (dcm->sess_pools, socket_id); + pool_name = format (0, "crypto_sess_socket%u%c", + socket_id, 0); + rmp = + rte_mempool_create((i8 *)pool_name, DPDK_CRYPTO_NB_SESS_OBJS, + max_sess_size, 512, 0, NULL, NULL, NULL, NULL, + socket_id, 0); + if (!rmp) + { + clib_warning ("failed to allocate %s", pool_name); + vec_free (pool_name); + goto error; + } + vec_free (pool_name); + vec_elt (dcm->sess_pools, socket_id) = rmp; + } + /* *INDENT-ON* */ +#endif + dpdk_esp_init (); /* Add new next node and set as default */ diff --git a/src/plugins/dpdk/ipsec/ipsec.h b/src/plugins/dpdk/ipsec/ipsec.h index d7940345..a94dd682 100644 --- a/src/plugins/dpdk/ipsec/ipsec.h +++ b/src/plugins/dpdk/ipsec/ipsec.h @@ -53,6 +53,7 @@ typedef struct u8 cipher_algo; u8 auth_algo; u8 is_outbound; + u8 is_aead; } crypto_worker_qp_key_t; typedef struct @@ -81,6 +82,8 @@ typedef struct typedef struct { + struct rte_mempool **sess_h_pools; + struct rte_mempool **sess_pools; struct rte_mempool **cop_pools; crypto_worker_main_t *workers_main; u8 enabled; @@ -146,12 +149,14 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap, { struct { - uint8_t cipher_algo; enum rte_crypto_sym_xform_type type; union { enum rte_crypto_auth_algorithm auth; enum rte_crypto_cipher_algorithm cipher; +#if ! DPDK_NO_AEAD + enum rte_crypto_aead_algorithm aead; +#endif }; char *name; } supported_algo[] = @@ -162,15 +167,18 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap, { .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher = RTE_CRYPTO_CIPHER_AES_CBC,.name = "AES_CBC"}, +#if DPDK_NO_AEAD { .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher = - RTE_CRYPTO_CIPHER_AES_CTR,.name = "AES_CTR"}, + RTE_CRYPTO_CIPHER_AES_GCM,.name = "AES-GCM"}, +#else { - .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher = - RTE_CRYPTO_CIPHER_3DES_CBC,.name = "3DES-CBC"}, + .type = RTE_CRYPTO_SYM_XFORM_AEAD,.aead = + RTE_CRYPTO_AEAD_AES_GCM,.name = "AES-GCM"}, +#endif { - .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher = - RTE_CRYPTO_CIPHER_AES_GCM,.name = "AES-GCM"}, + .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = + RTE_CRYPTO_AUTH_NULL,.name = "NULL"}, { .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = RTE_CRYPTO_AUTH_SHA1_HMAC,.name = "HMAC-SHA1"}, @@ -183,15 +191,16 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap, { .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = RTE_CRYPTO_AUTH_SHA512_HMAC,.name = "HMAC-SHA512"}, - { - .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = - RTE_CRYPTO_AUTH_AES_XCBC_MAC,.name = "AES-XCBC-MAC"}, +#if DPDK_NO_AEAD { .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth = RTE_CRYPTO_AUTH_AES_GCM,.name = "AES-GCM"}, +#endif { /* tail */ - .type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED},}; + .type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED} + }; + uint32_t i = 0; if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) @@ -203,6 +212,10 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap, { if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER && cap->sym.cipher.algo == supported_algo[i].cipher) || +#if ! DPDK_NO_AEAD + (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD && + cap->sym.aead.algo == supported_algo[i].aead) || +#endif (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH && cap->sym.auth.algo == supported_algo[i].auth)) { -- cgit 1.2.3-korg From 206b2d4342b1c1b7715c7d442f582da02e5ec9b9 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Fri, 25 Aug 2017 19:10:57 +0200 Subject: dpdk: bump to dpdk 17.08, remove support for dpdk 17.02 Change-Id: I674fb1212e48693939045523df085326a4dd1809 Signed-off-by: Damjan Marion --- dpdk/Makefile | 5 ++--- src/plugins/dpdk/buffer.c | 17 ----------------- src/plugins/dpdk/device/device.c | 4 ---- src/plugins/dpdk/device/init.c | 4 ---- src/plugins/dpdk/hqos/hqos.c | 8 -------- src/plugins/dpdk/ipsec/esp.h | 6 ++++-- 6 files changed, 6 insertions(+), 38 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 8d5b42ef..06e38f76 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -24,12 +24,11 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) -DPDK_VERSION ?= 17.05 -PKG_SUFFIX ?= vpp6 +DPDK_VERSION ?= 17.08 +PKG_SUFFIX ?= vpp1 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) -DPDK_17.02_TARBALL_MD5_CKSUM := 6b9f7387c35641f4e8dbba3e528f2376 DPDK_17.05_TARBALL_MD5_CKSUM := 0a68c31cd6a6cabeed0a4331073e4c05 DPDK_17.08_TARBALL_MD5_CKSUM := 0641f59ea8ea98afefa7cfa2699f6241 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c index b0f247e1..28af100a 100644 --- a/src/plugins/dpdk/buffer.c +++ b/src/plugins/dpdk/buffer.c @@ -213,18 +213,6 @@ fill_free_list (vlib_main_t * vm, mb2 = vm->mbuf_alloc_list[i + 2]; mb3 = vm->mbuf_alloc_list[i + 3]; -#if RTE_VERSION < RTE_VERSION_NUM(17, 5, 0, 0) - ASSERT (rte_mbuf_refcnt_read (mb0) == 0); - ASSERT (rte_mbuf_refcnt_read (mb1) == 0); - ASSERT (rte_mbuf_refcnt_read (mb2) == 0); - ASSERT (rte_mbuf_refcnt_read (mb3) == 0); - - rte_mbuf_refcnt_set (mb0, 1); - rte_mbuf_refcnt_set (mb1, 1); - rte_mbuf_refcnt_set (mb2, 1); - rte_mbuf_refcnt_set (mb3, 1); -#endif - b0 = vlib_buffer_from_rte_mbuf (mb0); b1 = vlib_buffer_from_rte_mbuf (mb1); b2 = vlib_buffer_from_rte_mbuf (mb2); @@ -259,11 +247,6 @@ fill_free_list (vlib_main_t * vm, { mb0 = vm->mbuf_alloc_list[i]; -#if RTE_VERSION < RTE_VERSION_NUM(17, 5, 0, 0) - ASSERT (rte_mbuf_refcnt_read (mb0) == 0); - rte_mbuf_refcnt_set (mb0, 1); -#endif - b0 = vlib_buffer_from_rte_mbuf (mb0); bi0 = vlib_get_buffer_index (vm, b0); diff --git a/src/plugins/dpdk/device/device.c b/src/plugins/dpdk/device/device.c index c755060d..a247c7c9 100644 --- a/src/plugins/dpdk/device/device.c +++ b/src/plugins/dpdk/device/device.c @@ -254,11 +254,7 @@ static_always_inline &tx_vector[tx_tail], tx_head - tx_tail); rv = rte_ring_sp_enqueue_burst (hqos->swq, (void **) &tx_vector[tx_tail], -#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0) (uint16_t) (tx_head - tx_tail), 0); -#else - (uint16_t) (tx_head - tx_tail)); -#endif } else if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD)) { diff --git a/src/plugins/dpdk/device/init.c b/src/plugins/dpdk/device/init.c index 6f7e168b..a795ba0e 100755 --- a/src/plugins/dpdk/device/init.c +++ b/src/plugins/dpdk/device/init.c @@ -1186,11 +1186,7 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) /* Set up DPDK eal and packet mbuf pool early. */ -#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0) rte_log_set_global_level (log_level); -#else - rte_set_log_level (log_level); -#endif vm = vlib_get_main (); diff --git a/src/plugins/dpdk/hqos/hqos.c b/src/plugins/dpdk/hqos/hqos.c index 2f2504d6..813eb91c 100644 --- a/src/plugins/dpdk/hqos/hqos.c +++ b/src/plugins/dpdk/hqos/hqos.c @@ -430,11 +430,7 @@ dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm) pkts_enq_len += rte_ring_sc_dequeue_burst (swq, (void **) &pkts_enq[pkts_enq_len], -#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0) hqos->hqos_burst_enq, 0); -#else - hqos->hqos_burst_enq); -#endif /* Get next SWQ for this device */ swq_pos++; @@ -525,11 +521,7 @@ dpdk_hqos_thread_internal (vlib_main_t * vm) pkts_enq_len += rte_ring_sc_dequeue_burst (swq, (void **) &pkts_enq[pkts_enq_len], -#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0) hqos->hqos_burst_enq, 0); -#else - hqos->hqos_burst_enq); -#endif /* Get next SWQ for this device */ swq_pos++; diff --git a/src/plugins/dpdk/ipsec/esp.h b/src/plugins/dpdk/ipsec/esp.h index 308a66af..5b5c81ae 100644 --- a/src/plugins/dpdk/ipsec/esp.h +++ b/src/plugins/dpdk/ipsec/esp.h @@ -242,9 +242,11 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess, cipher_xform.aead.key.length = sa->crypto_key_len; if (is_outbound) - cipher_xform.cipher.op = RTE_CRYPTO_AEAD_OP_ENCRYPT; + cipher_xform.cipher.op = + (enum rte_crypto_cipher_operation) RTE_CRYPTO_AEAD_OP_ENCRYPT; else - cipher_xform.cipher.op = RTE_CRYPTO_AEAD_OP_DECRYPT; + cipher_xform.cipher.op = + (enum rte_crypto_cipher_operation) RTE_CRYPTO_AEAD_OP_DECRYPT; cipher_xform.next = NULL; xfs = &cipher_xform; p_key->is_aead = 1; -- cgit 1.2.3-korg From 9746552e98166cd07c5d77b26d805c5e602b0335 Mon Sep 17 00:00:00 2001 From: Brian Brooks Date: Mon, 28 Aug 2017 13:05:26 -0500 Subject: Native arm64 build: dpdk/Makefile change With this change, the status of `make build': Huawei D02, Linux 4.4.0, gcc 5.4.1 - success AMD Seattle, Linux 4.4.6, gcc 5.3.1 - compiler ICEs Cavium ThunderX, Linux 4.4.49, gcc 5.4.0 - success Before: Huawei D02, Linux 4.4.0, gcc 5.4.1 - fail AMD Seattle, Linux 4.4.6, gcc 5.3.1 - fail Cavium ThunderX, Linux 4.4.49, gcc 5.4.0 - success Change-Id: I49db34a33f9ca0725c7511d4f796706892b5b2da Signed-off-by: Brian Brooks --- dpdk/Makefile | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 06e38f76..4d8dea28 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -75,14 +75,23 @@ ifeq ($(MACHINE),$(filter $(MACHINE),x86_64 i686)) DPDK_TARGET ?= $(MACHINE)-native-linuxapp-$(DPDK_CC) DPDK_MACHINE ?= nhm DPDK_TUNE ?= core-avx2 +else ifeq ($(MACHINE),aarch64) +export CROSS="" +ifneq (,$(findstring thunder,$(shell [[ -f /sys/bus/pci/devices/0000:00:01.0/uevent ]] && cat /sys/bus/pci/devices/0000:00:01.0/uevent | grep cavium))) ############################################################################## # Cavium ThunderX ############################################################################## -else ifneq (,$(findstring thunder,$(shell cat /sys/bus/pci/devices/0000:00:01.0/uevent | grep cavium))) -export CROSS="" DPDK_TARGET ?= arm64-thunderx-linuxapp-$(DPDK_CC) DPDK_MACHINE ?= thunderx DPDK_TUNE ?= generic +else +############################################################################## +# Generic ARM64 +############################################################################## +DPDK_TARGET ?= arm64-armv8a-linuxapp-$(DPDK_CC) +DPDK_MACHINE ?= armv8a +DPDK_TUNE ?= generic +endif ############################################################################## # Unknown platofrm -- cgit 1.2.3-korg From a7191840beeb2c3a0f2598707ed1051a9f23c45f Mon Sep 17 00:00:00 2001 From: Brian Brooks Date: Wed, 6 Sep 2017 13:19:48 -0500 Subject: Improved arm64 chip detection Use ARMv8 Main ID Register (exposed thru /proc/cpuinfo) to identify the CPU implementor and part number. For further details, see the ARMv8 ARM D7.2.66. Change-Id: I2b0d0b165cda4ab9fc57c645af90e9e354b73f44 Signed-off-by: Brian Brooks Signed-off-by: Jeremy Linton Signed-off-by: Ola Liljedahl Reviewed-by: Song Zhu --- dpdk/Makefile | 57 ++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 11 deletions(-) (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 4d8dea28..afe50c4b 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -75,29 +75,64 @@ ifeq ($(MACHINE),$(filter $(MACHINE),x86_64 i686)) DPDK_TARGET ?= $(MACHINE)-native-linuxapp-$(DPDK_CC) DPDK_MACHINE ?= nhm DPDK_TUNE ?= core-avx2 + +############################################################################## +# ARM64 +############################################################################## else ifeq ($(MACHINE),aarch64) export CROSS="" -ifneq (,$(findstring thunder,$(shell [[ -f /sys/bus/pci/devices/0000:00:01.0/uevent ]] && cat /sys/bus/pci/devices/0000:00:01.0/uevent | grep cavium))) +DPDK_TARGET ?= arm64-armv8a-linuxapp-$(DPDK_CC) +DPDK_MACHINE ?= armv8a +DPDK_TUNE ?= generic + +CPU_IMP_ARM = 0x41 +CPU_IMP_CAVIUM = 0x43 + +CPU_PART_ARM_CORTEX_A53 = 0xd03 +CPU_PART_ARM_CORTEX_A57 = 0xd07 +CPU_PART_ARM_CORTEX_A72 = 0xd08 +CPU_PART_ARM_CORTEX_A73 = 0xd09 + +CPU_PART_CAVIUM_THUNDERX = 0x0a1 +CPU_PART_CAVIUM_THUNDERX_81XX = 0x0a2 +CPU_PART_CAVIUM_THUNDERX_83XX = 0x0a3 + +MIDR_IMPLEMENTER=$(shell awk '/implementer/ {print $$4;exit}' /proc/cpuinfo) +MIDR_PARTNUM=$(shell awk '/part/ {print $$4;exit}' /proc/cpuinfo) + +ifeq ($(MIDR_IMPLEMENTER),$(CPU_IMP_ARM)) ############################################################################## -# Cavium ThunderX +# Arm Cortex ############################################################################## -DPDK_TARGET ?= arm64-thunderx-linuxapp-$(DPDK_CC) -DPDK_MACHINE ?= thunderx -DPDK_TUNE ?= generic +CPU_PART_ARM_TUNE := $(CPU_PART_ARM_CORTEX_A53)/cortex-a53 \ + $(CPU_PART_ARM_CORTEX_A57)/cortex-a57 \ + $(CPU_PART_ARM_CORTEX_A72)/cortex-a72 \ + $(CPU_PART_ARM_CORTEX_A73)/cortex-a73 +CPU_TUNE = $(notdir $(filter $(MIDR_PARTNUM)/%,$(CPU_PART_ARM_TUNE))) +ifneq ($(CPU_TUNE),) +DPDK_TUNE = $(CPU_TUNE) else +$(warning Unknown Arm CPU) +endif + +else ifeq ($(MIDR_IMPLEMENTER),$(CPU_IMP_CAVIUM)) ############################################################################## -# Generic ARM64 +# Cavium ThunderX ############################################################################## -DPDK_TARGET ?= arm64-armv8a-linuxapp-$(DPDK_CC) -DPDK_MACHINE ?= armv8a -DPDK_TUNE ?= generic +ifneq (,$(findstring $(MIDR_PARTNUM),$(CPU_PART_CAVIUM_THUNDERX) \ + $(CPU_PART_CAVIUM_THUNDERX_81XX) $(CPU_PART_CAVIUM_THUNDERX_83XX))) +DPDK_TARGET = arm64-thunderx-linuxapp-$(DPDK_CC) +DPDK_MACHINE = thunderx +else +$(warning Unknown Cavium CPU) +endif endif ############################################################################## -# Unknown platofrm +# Unknown platform ############################################################################## else -$(error unknown platform) +$(error Unknown platform) endif # /proc/cpuinfo does not exist on platforms without a /proc and on some -- cgit 1.2.3-korg From 4b0ac82a739bf7f1233ebb61269e27aa4cf9ab36 Mon Sep 17 00:00:00 2001 From: Thomas F Herbert Date: Tue, 29 Aug 2017 16:07:51 -0400 Subject: Add option to build without multi-buffer crypto. JIRA VPP-498 This patch also allows RPMs to be built without multi- buffer crypto for some RPM based downstream distros that don't have sufficiently new nasm or don't have an USA export license for multi-buffer crypto. The default is to build WITH multi-buffer crypto for x86-64. This patch allows optional building without multi-buffer crypto. To build without multi-buffer crypto, set the AESNI environment variable to n. To build rpm packages without multi-buffer crypto, build the rpms with the option turned off. make build AESNI=n or.. make pkg-rpm --without aesni ---How to test this patch on a Centos build.--- Build as above and verify that nasm isn't executed during the build process. vpp may be installed and the dpdk plugin may be inspected to verify that the multi-buffer code isn't present. Change-Id: I8c5cfd4cdd9eb2b96772a687eaa54560806e001b Signed-off-by: Thomas F Herbert --- Makefile | 3 ++- dpdk/Makefile | 5 +++-- extras/rpm/vpp.spec | 12 ++++++++++-- 3 files changed, 15 insertions(+), 5 deletions(-) (limited to 'dpdk/Makefile') diff --git a/Makefile b/Makefile index 01da7d12..c08115d4 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,7 @@ CCACHE_DIR?=$(BR)/.ccache GDB?=gdb PLATFORM?=vpp SAMPLE_PLUGIN?=no +export AESNI?=y ,:=, define disable_plugins @@ -94,7 +95,7 @@ endif RPM_DEPENDS += chrpath libffi-devel rpm-build ifeq ($(OS_ID),fedora) RPM_DEPENDS += nasm -else +else ifeq ($(findstring y,$(AESNI)),y) RPM_DEPENDS += https://kojipkgs.fedoraproject.org//packages/nasm/2.12.02/2.fc26/x86_64/nasm-2.12.02-2.fc26.x86_64.rpm endif diff --git a/dpdk/Makefile b/dpdk/Makefile index afe50c4b..4e0ad4f3 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -34,14 +34,13 @@ DPDK_17.08_TARBALL_MD5_CKSUM := 0641f59ea8ea98afefa7cfa2699f6241 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) MACHINE=$(shell uname -m) -AESNI ?= n ISA_L_CRYPTO_LIB := n IPSEC_MB_VER ?= 0.46 ISA_L_CRYPTO_VER := 2.18.0 ifeq ($(MACHINE),$(filter $(MACHINE),x86_64)) -AESNI = y + AESNI ?= y # DPDK pre 17.08 depends on ISA-L Crypto library for GCM PMD ifneq ($(firstword $(sort $(DPDK_VERSION), 17.08)), 17.08) ISA_L_CRYPTO_LIB = y @@ -49,6 +48,8 @@ AESNI = y $(info Building ISA-L Crypto $(ISA_L_CRYPTO_VER) library) endif $(info Building IPSec-MB $(IPSEC_MB_VER) library) +else + AESNI ?= N endif AESNIMB_LIB_TARBALL := v$(IPSEC_MB_VER).tar.gz diff --git a/extras/rpm/vpp.spec b/extras/rpm/vpp.spec index 439e6bda..6c14b039 100644 --- a/extras/rpm/vpp.spec +++ b/extras/rpm/vpp.spec @@ -1,3 +1,4 @@ +%bcond_without aesni %define _vpp_build_dir build-tool-native %define _unitdir /lib/systemd/system %define _topdir %(pwd) @@ -41,7 +42,9 @@ BuildRequires: python-devel, python-virtualenv %endif BuildRequires: glibc-static, java-1.8.0-openjdk, java-1.8.0-openjdk-devel yum-utils, redhat-lsb BuildRequires: apr-devel +%if %{with aesni} BuildRequires: nasm +%endif BuildRequires: numactl-devel BuildRequires: autoconf automake libtool byacc bison flex @@ -120,8 +123,13 @@ This package contains the python bindings for the vpp api groupadd -f -r vpp %build -make bootstrap -make -C build-root PLATFORM=vpp TAG=%{_vpp_tag} install-packages +%if %{with aesni} + make bootstrap + make -C build-root PLATFORM=vpp TAG=%{_vpp_tag} install-packages +%else + make bootstrap AESNI=n + make -C build-root PLATFORM=vpp AESNI=n TAG=%{_vpp_tag} install-packages +%endif cd %{_mu_build_dir}/../src/vpp-api/python && %py2_build %install -- cgit 1.2.3-korg From b5518bedd95bea9c30cebcd5b0db76bd00f30a80 Mon Sep 17 00:00:00 2001 From: Steve Shin Date: Mon, 9 Oct 2017 15:04:56 -0700 Subject: dpdk: patch to support bonded interface for MLX NIC At present, creating bonding devices using --vdev is broken for PMD like mlx5 as it is neither UIO nor VFIO based and hence PMD driver is unknown to find_port_id_by_pci_addr(). This DPDK patch fixes parsing PCI ID from bonding device params by verifying it in RTE PCI bus, rather than checking dev->kdrv. Change-Id: If575f63ef31733102566610d769ddd212d74736a Signed-off-by: Steve Shin (cherry picked from commit 268e64e312257b0ab36e0d5b9124cc3f2a1841a7) --- dpdk/Makefile | 2 +- .../0001-net-bonding-support-for-mlx.patch | 63 ++++++++++++++++++++++ 2 files changed, 64 insertions(+), 1 deletion(-) create mode 100644 dpdk/dpdk-17.08_patches/0001-net-bonding-support-for-mlx.patch (limited to 'dpdk/Makefile') diff --git a/dpdk/Makefile b/dpdk/Makefile index 4e0ad4f3..179e3565 100644 --- a/dpdk/Makefile +++ b/dpdk/Makefile @@ -25,7 +25,7 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) DPDK_VERSION ?= 17.08 -PKG_SUFFIX ?= vpp1 +PKG_SUFFIX ?= vpp2 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) diff --git a/dpdk/dpdk-17.08_patches/0001-net-bonding-support-for-mlx.patch b/dpdk/dpdk-17.08_patches/0001-net-bonding-support-for-mlx.patch new file mode 100644 index 00000000..13a2ba67 --- /dev/null +++ b/dpdk/dpdk-17.08_patches/0001-net-bonding-support-for-mlx.patch @@ -0,0 +1,63 @@ +diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c +index bb634c6..7c65dda 100644 +--- a/drivers/net/bonding/rte_eth_bond_args.c ++++ b/drivers/net/bonding/rte_eth_bond_args.c +@@ -61,16 +61,6 @@ + unsigned i; + + for (i = 0; i < rte_eth_dev_count(); i++) { +- +- /* Currently populated by rte_eth_copy_pci_info(). +- * +- * TODO: Once the PCI bus has arrived we should have a better +- * way to test for being a PCI device or not. +- */ +- if (rte_eth_devices[i].data->kdrv == RTE_KDRV_UNKNOWN || +- rte_eth_devices[i].data->kdrv == RTE_KDRV_NONE) +- continue; +- + pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]); + eth_pci_addr = &pci_dev->addr; + +@@ -98,6 +88,16 @@ + return -1; + } + ++static inline int ++pci_addr_cmp(const struct rte_device *dev, const void *_pci_addr) ++{ ++ struct rte_pci_device *pdev; ++ const struct rte_pci_addr *paddr = _pci_addr; ++ ++ pdev = RTE_DEV_TO_PCI(*(struct rte_device **)(void *)&dev); ++ return rte_eal_compare_pci_addr(&pdev->addr, paddr); ++} ++ + /** + * Parses a port identifier string to a port id by pci address, then by name, + * and finally port id. +@@ -106,10 +106,23 @@ + parse_port_id(const char *port_str) + { + struct rte_pci_addr dev_addr; ++ struct rte_bus *pci_bus; ++ struct rte_device *dev; + int port_id; + ++ pci_bus = rte_bus_find_by_name("pci"); ++ if (pci_bus == NULL) { ++ RTE_LOG(ERR, PMD, "unable to find PCI bus\n"); ++ return -1; ++ } ++ + /* try parsing as pci address, physical devices */ +- if (eal_parse_pci_DomBDF(port_str, &dev_addr) == 0) { ++ if (pci_bus->parse(port_str, &dev_addr) == 0) { ++ dev = pci_bus->find_device(NULL, pci_addr_cmp, &dev_addr); ++ if (dev == NULL) { ++ RTE_LOG(ERR, PMD, "unable to find PCI device\n"); ++ return -1; ++ } + port_id = find_port_id_by_pci_addr(&dev_addr); + if (port_id < 0) + return -1; -- cgit 1.2.3-korg