aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mvneta
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 11:59:50 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 12:00:19 +0000
commit8d01b9cd70a67cdafd5b965a70420c3bd7fb3f82 (patch)
tree208e3bc33c220854d89d010e3abf720a2e62e546 /drivers/net/mvneta
parentb63264c8342e6a1b6971c79550d2af2024b6a4de (diff)
New upstream version 18.11-rc1upstream/18.11-rc1
Change-Id: Iaa71986dd6332e878d8f4bf493101b2bbc6313bb Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/mvneta')
-rw-r--r--drivers/net/mvneta/Makefile42
-rw-r--r--drivers/net/mvneta/meson.build28
-rw-r--r--drivers/net/mvneta/mvneta_ethdev.c987
-rw-r--r--drivers/net/mvneta/mvneta_ethdev.h79
-rw-r--r--drivers/net/mvneta/mvneta_rxtx.c1030
-rw-r--r--drivers/net/mvneta/mvneta_rxtx.h38
-rw-r--r--drivers/net/mvneta/rte_pmd_mvneta_version.map3
7 files changed, 2207 insertions, 0 deletions
diff --git a/drivers/net/mvneta/Makefile b/drivers/net/mvneta/Makefile
new file mode 100644
index 00000000..05a0487c
--- /dev/null
+++ b/drivers/net/mvneta/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mvneta.a
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_mvneta_version.map
+
+# external library dependencies
+CFLAGS += -I$(RTE_SDK)/drivers/common/mvep
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_SIZE=64
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+LDLIBS += -L$(LIBMUSDK_PATH)/lib
+LDLIBS += -lmusdk
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile
+LDLIBS += -lrte_bus_vdev -lrte_common_mvep
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_MVNETA_PMD) += mvneta_ethdev.c mvneta_rxtx.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/mvneta/meson.build b/drivers/net/mvneta/meson.build
new file mode 100644
index 00000000..c0b1bce0
--- /dev/null
+++ b/drivers/net/mvneta/meson.build
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs : [lib_dir], required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += [
+ '-DMVCONF_TYPES_PUBLIC',
+ '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC',
+ '-DMVCONF_DMA_PHYS_ADDR_T_SIZE=64'
+ ]
+endif
+
+sources = files(
+ 'mvneta_ethdev.c',
+ 'mvneta_rxtx.c'
+)
+
+deps += ['cfgfile', 'common_mvep']
diff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c
new file mode 100644
index 00000000..2d766645
--- /dev/null
+++ b/drivers/net/mvneta/mvneta_ethdev.c
@@ -0,0 +1,987 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_kvargs.h>
+#include <rte_bus_vdev.h>
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <rte_mvep_common.h>
+
+#include "mvneta_rxtx.h"
+
+
+#define MVNETA_IFACE_NAME_ARG "iface"
+
+#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CHECKSUM)
+
+/** Port Tx offloads capabilities */
+#define MVNETA_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define MVNETA_PKT_SIZE_MAX (16382 - MV_MH_SIZE) /* 9700B */
+#define MVNETA_DEFAULT_MTU 1500
+
+#define MVNETA_MAC_ADDRS_MAX 256 /*16 UC, 256 IP, 256 MC/BC */
+/** Maximum length of a match string */
+#define MVNETA_MATCH_LEN 16
+
+int mvneta_logtype;
+
+static const char * const valid_args[] = {
+ MVNETA_IFACE_NAME_ARG,
+ NULL
+};
+
+struct mvneta_ifnames {
+ const char *names[NETA_NUM_ETH_PPIO];
+ int idx;
+};
+
+static int mvneta_dev_num;
+
+/**
+ * Deinitialize packet processor.
+ */
+static void
+mvneta_neta_deinit(void)
+{
+ neta_deinit();
+}
+
+/**
+ * Initialize packet processor.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_neta_init(void)
+{
+ return neta_init();
+}
+
+/**
+ * Callback used by rte_kvargs_process() during argument parsing.
+ *
+ * @param key
+ * Pointer to the parsed key (unused).
+ * @param value
+ * Pointer to the parsed value.
+ * @param extra_args
+ * Pointer to the extra arguments which contains address of the
+ * table of pointers to parsed interface names.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+mvneta_ifnames_get(const char *key __rte_unused, const char *value,
+ void *extra_args)
+{
+ struct mvneta_ifnames *ifnames = extra_args;
+
+ ifnames->names[ifnames->idx++] = value;
+
+ return 0;
+}
+
+/**
+ * Ethernet device configuration.
+ *
+ * Prepare the driver for a given number of TX and RX queues and
+ * configure RSS if supported.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_dev_configure(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ struct neta_ppio_params *ppio_params;
+
+ if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
+ MVNETA_LOG(INFO, "Unsupported RSS and rx multi queue mode %d",
+ dev->data->dev_conf.rxmode.mq_mode);
+ if (dev->data->nb_rx_queues > 1)
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.rxmode.split_hdr_size) {
+ MVNETA_LOG(INFO, "Split headers not supported");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ MRVL_NETA_ETH_HDRS_LEN;
+
+ if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ priv->multiseg = 1;
+
+ ppio_params = &priv->ppio_params;
+ ppio_params->outqs_params.num_outqs = dev->data->nb_tx_queues;
+ /* Default: 1 TC, no QoS supported. */
+ ppio_params->inqs_params.num_tcs = 1;
+ ppio_params->inqs_params.tcs_params[0].pkt_offset = MRVL_NETA_PKT_OFFS;
+ priv->ppio_id = dev->data->port_id;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to get information about the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure (unused).
+ * @param info
+ * Info structure output buffer.
+ */
+static void
+mvneta_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *info)
+{
+ info->speed_capa = ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G;
+
+ info->max_rx_queues = MRVL_NETA_RXQ_MAX;
+ info->max_tx_queues = MRVL_NETA_TXQ_MAX;
+ info->max_mac_addrs = MVNETA_MAC_ADDRS_MAX;
+
+ info->rx_desc_lim.nb_max = MRVL_NETA_RXD_MAX;
+ info->rx_desc_lim.nb_min = MRVL_NETA_RXD_MIN;
+ info->rx_desc_lim.nb_align = MRVL_NETA_RXD_ALIGN;
+
+ info->tx_desc_lim.nb_max = MRVL_NETA_TXD_MAX;
+ info->tx_desc_lim.nb_min = MRVL_NETA_TXD_MIN;
+ info->tx_desc_lim.nb_align = MRVL_NETA_TXD_ALIGN;
+
+ info->rx_offload_capa = MVNETA_RX_OFFLOADS;
+ info->rx_queue_offload_capa = MVNETA_RX_OFFLOADS;
+
+ info->tx_offload_capa = MVNETA_TX_OFFLOADS;
+ info->tx_queue_offload_capa = MVNETA_TX_OFFLOADS;
+
+ /* By default packets are dropped if no descriptors are available */
+ info->default_rxconf.rx_drop_en = 1;
+ /* Deferred tx queue start is not supported */
+ info->default_txconf.tx_deferred_start = 0;
+ info->default_txconf.offloads = 0;
+
+ info->max_rx_pktlen = MVNETA_PKT_SIZE_MAX;
+}
+
+/**
+ * Return supported packet types.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure (unused).
+ *
+ * @return
+ * Const pointer to the table with supported packet types.
+ */
+static const uint32_t *
+mvneta_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP
+ };
+
+ return ptypes;
+}
+
+/**
+ * DPDK callback to change the MTU.
+ *
+ * Setting the MTU affects hardware MRU (packets larger than the MRU
+ * will be dropped).
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mtu
+ * New MTU.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ uint16_t mbuf_data_size = 0; /* SW buffer size */
+ uint16_t mru;
+ int ret;
+
+ mru = MRVL_NETA_MTU_TO_MRU(mtu);
+ /*
+ * min_rx_buf_size is equal to mbuf data size
+ * if pmd didn't set it differently
+ */
+ mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+ /* Prevent PMD from:
+ * - setting mru greater than the mbuf size resulting in
+ * hw and sw buffer size mismatch
+ * - setting mtu that requires the support of scattered packets
+ * when this feature has not been enabled/supported so far.
+ */
+ if (!dev->data->scattered_rx &&
+ (mru + MRVL_NETA_PKT_OFFS > mbuf_data_size)) {
+ mru = mbuf_data_size - MRVL_NETA_PKT_OFFS;
+ mtu = MRVL_NETA_MRU_TO_MTU(mru);
+ MVNETA_LOG(WARNING, "MTU too big, max MTU possible limitted by"
+ " current mbuf size: %u. Set MTU to %u, MRU to %u",
+ mbuf_data_size, mtu, mru);
+ }
+
+ if (mtu < ETHER_MIN_MTU || mru > MVNETA_PKT_SIZE_MAX) {
+ MVNETA_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru);
+ return -EINVAL;
+ }
+
+ dev->data->mtu = mtu;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
+
+ if (!priv->ppio)
+ /* It is OK. New MTU will be set later on mvneta_dev_start */
+ return 0;
+
+ ret = neta_ppio_set_mru(priv->ppio, mru);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to change MRU");
+ return ret;
+ }
+
+ ret = neta_ppio_set_mtu(priv->ppio, mtu);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to change MTU");
+ return ret;
+ }
+ MVNETA_LOG(INFO, "MTU changed to %u, MRU = %u", mtu, mru);
+
+ return 0;
+}
+
+/**
+ * DPDK callback to bring the link up.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+
+ if (!priv->ppio)
+ return 0;
+
+ return neta_ppio_enable(priv->ppio);
+}
+
+/**
+ * DPDK callback to bring the link down.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+
+ if (!priv->ppio)
+ return 0;
+
+ return neta_ppio_disable(priv->ppio);
+}
+
+/**
+ * DPDK callback to start the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mvneta_dev_start(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ char match[MVNETA_MATCH_LEN];
+ int ret = 0, i;
+
+ if (priv->ppio)
+ return mvneta_dev_set_link_up(dev);
+
+ snprintf(match, sizeof(match), "%s", dev->data->name);
+ priv->ppio_params.match = match;
+ priv->ppio_params.inqs_params.mtu = dev->data->mtu;
+
+ ret = neta_ppio_init(&priv->ppio_params, &priv->ppio);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to init ppio");
+ return ret;
+ }
+ priv->ppio_id = priv->ppio->port_id;
+
+ /*
+ * In case there are some some stale uc/mc mac addresses flush them
+ * here. It cannot be done during mvneta_dev_close() as port information
+ * is already gone at that point (due to neta_ppio_deinit() in
+ * mvneta_dev_stop()).
+ */
+ if (!priv->uc_mc_flushed) {
+ ret = neta_ppio_flush_mac_addrs(priv->ppio, 0, 1);
+ if (ret) {
+ MVNETA_LOG(ERR,
+ "Failed to flush uc/mc filter list");
+ goto out;
+ }
+ priv->uc_mc_flushed = 1;
+ }
+
+ ret = mvneta_alloc_rx_bufs(dev);
+ if (ret)
+ goto out;
+
+ ret = mvneta_mtu_set(dev, dev->data->mtu);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to set MTU %d", dev->data->mtu);
+ goto out;
+ }
+
+ ret = mvneta_dev_set_link_up(dev);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to set link up");
+ goto out;
+ }
+
+ /* start tx queues */
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ mvneta_set_tx_function(dev);
+
+ return 0;
+
+out:
+ MVNETA_LOG(ERR, "Failed to start device");
+ neta_ppio_deinit(priv->ppio);
+ return ret;
+}
+
+/**
+ * DPDK callback to stop the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mvneta_dev_stop(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+
+ if (!priv->ppio)
+ return;
+
+ mvneta_dev_set_link_down(dev);
+ mvneta_flush_queues(dev);
+ neta_ppio_deinit(priv->ppio);
+
+ priv->ppio = NULL;
+}
+
+/**
+ * DPDK callback to close the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mvneta_dev_close(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ int i;
+
+ if (priv->ppio)
+ mvneta_dev_stop(dev);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ mvneta_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ mvneta_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param wait_to_complete
+ * Wait for request completion (ignored).
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
+{
+ /*
+ * TODO
+ * once MUSDK provides necessary API use it here
+ */
+ struct mvneta_priv *priv = dev->data->dev_private;
+ struct ethtool_cmd edata;
+ struct ifreq req;
+ int ret, fd, link_up;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ edata.cmd = ETHTOOL_GSET;
+
+ strcpy(req.ifr_name, dev->data->name);
+ req.ifr_data = (void *)&edata;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd == -1)
+ return -EFAULT;
+ ret = ioctl(fd, SIOCETHTOOL, &req);
+ if (ret == -1) {
+ close(fd);
+ return -EFAULT;
+ }
+
+ close(fd);
+
+ switch (ethtool_cmd_speed(&edata)) {
+ case SPEED_10:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+ break;
+ case SPEED_100:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case SPEED_1000:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case SPEED_2500:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+ default:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ }
+
+ dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
+ ETH_LINK_HALF_DUPLEX;
+ dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
+ ETH_LINK_FIXED;
+
+ neta_ppio_get_link_state(priv->ppio, &link_up);
+ dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to enable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mvneta_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ int ret, en;
+
+ if (!priv->ppio)
+ return;
+
+ neta_ppio_get_promisc(priv->ppio, &en);
+ if (en) {
+ MVNETA_LOG(INFO, "Promiscuous already enabled");
+ return;
+ }
+
+ ret = neta_ppio_set_promisc(priv->ppio, 1);
+ if (ret)
+ MVNETA_LOG(ERR, "Failed to enable promiscuous mode");
+}
+
+/**
+ * DPDK callback to disable allmulticast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mvneta_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ int ret, en;
+
+ if (!priv->ppio)
+ return;
+
+ neta_ppio_get_promisc(priv->ppio, &en);
+ if (!en) {
+ MVNETA_LOG(INFO, "Promiscuous already disabled");
+ return;
+ }
+
+ ret = neta_ppio_set_promisc(priv->ppio, 0);
+ if (ret)
+ MVNETA_LOG(ERR, "Failed to disable promiscuous mode");
+}
+
+/**
+ * DPDK callback to remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index.
+ */
+static void
+mvneta_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ char buf[ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ if (!priv->ppio)
+ return;
+
+ ret = neta_ppio_remove_mac_addr(priv->ppio,
+ dev->data->mac_addrs[index].addr_bytes);
+ if (ret) {
+ ether_format_addr(buf, sizeof(buf),
+ &dev->data->mac_addrs[index]);
+ MVNETA_LOG(ERR, "Failed to remove mac %s", buf);
+ }
+}
+
+/**
+ * DPDK callback to add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ * @param vmdq
+ * VMDq pool index to associate address with (unused).
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq __rte_unused)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ char buf[ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ if (index == 0)
+ /* For setting index 0, mrvl_mac_addr_set() should be used.*/
+ return -1;
+
+ if (!priv->ppio)
+ return 0;
+
+ ret = neta_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
+ if (ret) {
+ ether_format_addr(buf, sizeof(buf), mac_addr);
+ MVNETA_LOG(ERR, "Failed to add mac %s", buf);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * DPDK callback to set the primary MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ */
+static int
+mvneta_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return -EINVAL;
+
+ ret = neta_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
+ if (ret) {
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, sizeof(buf), mac_addr);
+ MVNETA_LOG(ERR, "Failed to set mac to %s", buf);
+ }
+ return 0;
+}
+
+/**
+ * DPDK callback to get device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param stats
+ * Stats structure output buffer.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ struct neta_ppio_statistics ppio_stats;
+ unsigned int ret;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ ret = neta_ppio_get_statistics(priv->ppio, &ppio_stats);
+ if (unlikely(ret)) {
+ MVNETA_LOG(ERR, "Failed to update port statistics");
+ return ret;
+ }
+
+ stats->ipackets += ppio_stats.rx_packets +
+ ppio_stats.rx_broadcast_packets +
+ ppio_stats.rx_multicast_packets -
+ priv->prev_stats.ipackets;
+ stats->opackets += ppio_stats.tx_packets +
+ ppio_stats.tx_broadcast_packets +
+ ppio_stats.tx_multicast_packets -
+ priv->prev_stats.opackets;
+ stats->ibytes += ppio_stats.rx_bytes - priv->prev_stats.ibytes;
+ stats->obytes += ppio_stats.tx_bytes - priv->prev_stats.obytes;
+ stats->imissed += ppio_stats.rx_discard +
+ ppio_stats.rx_overrun -
+ priv->prev_stats.imissed;
+
+ stats->ierrors = ppio_stats.rx_packets_err +
+ ppio_stats.rx_errors +
+ ppio_stats.rx_crc_error -
+ priv->prev_stats.ierrors;
+ stats->oerrors = ppio_stats.tx_errors - priv->prev_stats.oerrors;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to clear device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mvneta_stats_reset(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ unsigned int ret;
+
+ if (!priv->ppio)
+ return;
+
+ ret = mvneta_stats_get(dev, &priv->prev_stats);
+ if (unlikely(ret))
+ RTE_LOG(ERR, PMD, "Failed to reset port statistics");
+}
+
+
+static const struct eth_dev_ops mvneta_ops = {
+ .dev_configure = mvneta_dev_configure,
+ .dev_start = mvneta_dev_start,
+ .dev_stop = mvneta_dev_stop,
+ .dev_set_link_up = mvneta_dev_set_link_up,
+ .dev_set_link_down = mvneta_dev_set_link_down,
+ .dev_close = mvneta_dev_close,
+ .link_update = mvneta_link_update,
+ .promiscuous_enable = mvneta_promiscuous_enable,
+ .promiscuous_disable = mvneta_promiscuous_disable,
+ .mac_addr_remove = mvneta_mac_addr_remove,
+ .mac_addr_add = mvneta_mac_addr_add,
+ .mac_addr_set = mvneta_mac_addr_set,
+ .mtu_set = mvneta_mtu_set,
+ .stats_get = mvneta_stats_get,
+ .stats_reset = mvneta_stats_reset,
+ .dev_infos_get = mvneta_dev_infos_get,
+ .dev_supported_ptypes_get = mvneta_dev_supported_ptypes_get,
+ .rxq_info_get = mvneta_rxq_info_get,
+ .txq_info_get = mvneta_txq_info_get,
+ .rx_queue_setup = mvneta_rx_queue_setup,
+ .rx_queue_release = mvneta_rx_queue_release,
+ .tx_queue_setup = mvneta_tx_queue_setup,
+ .tx_queue_release = mvneta_tx_queue_release,
+};
+
+/**
+ * Create device representing Ethernet port.
+ *
+ * @param name
+ * Pointer to the port's name.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
+{
+ int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
+ struct rte_eth_dev *eth_dev;
+ struct mvneta_priv *priv;
+ struct ifreq req;
+
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
+ return -ENOMEM;
+
+ priv = rte_zmalloc_socket(name, sizeof(*priv), 0, rte_socket_id());
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ eth_dev->data->dev_private = priv;
+
+ eth_dev->data->mac_addrs =
+ rte_zmalloc("mac_addrs",
+ ETHER_ADDR_LEN * MVNETA_MAC_ADDRS_MAX, 0);
+ if (!eth_dev->data->mac_addrs) {
+ MVNETA_LOG(ERR, "Failed to allocate space for eth addrs");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ memset(&req, 0, sizeof(req));
+ strcpy(req.ifr_name, name);
+ ret = ioctl(fd, SIOCGIFHWADDR, &req);
+ if (ret)
+ goto out_free;
+
+ memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
+ req.ifr_addr.sa_data, ETHER_ADDR_LEN);
+
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
+ eth_dev->device = &vdev->device;
+ eth_dev->rx_pkt_burst = mvneta_rx_pkt_burst;
+ mvneta_set_tx_function(eth_dev);
+ eth_dev->dev_ops = &mvneta_ops;
+
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+out_free:
+ rte_eth_dev_release_port(eth_dev);
+
+ return ret;
+}
+
+/**
+ * Cleanup previously created device representing Ethernet port.
+ *
+ * @param eth_dev
+ * Pointer to the corresponding rte_eth_dev structure.
+ */
+static void
+mvneta_eth_dev_destroy(struct rte_eth_dev *eth_dev)
+{
+ rte_eth_dev_release_port(eth_dev);
+}
+
+/**
+ * Cleanup previously created device representing Ethernet port.
+ *
+ * @param name
+ * Pointer to the port name.
+ */
+static void
+mvneta_eth_dev_destroy_name(const char *name)
+{
+ struct rte_eth_dev *eth_dev;
+
+ eth_dev = rte_eth_dev_allocated(name);
+ if (!eth_dev)
+ return;
+
+ mvneta_eth_dev_destroy(eth_dev);
+}
+
+/**
+ * DPDK callback to register the virtual device.
+ *
+ * @param vdev
+ * Pointer to the virtual device.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+rte_pmd_mvneta_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_kvargs *kvlist;
+ struct mvneta_ifnames ifnames;
+ int ret = -EINVAL;
+ uint32_t i, ifnum;
+ const char *params;
+
+ params = rte_vdev_device_args(vdev);
+ if (!params)
+ return -EINVAL;
+
+ kvlist = rte_kvargs_parse(params, valid_args);
+ if (!kvlist)
+ return -EINVAL;
+
+ ifnum = rte_kvargs_count(kvlist, MVNETA_IFACE_NAME_ARG);
+ if (ifnum > RTE_DIM(ifnames.names))
+ goto out_free_kvlist;
+
+ ifnames.idx = 0;
+ rte_kvargs_process(kvlist, MVNETA_IFACE_NAME_ARG,
+ mvneta_ifnames_get, &ifnames);
+
+ /*
+ * The below system initialization should be done only once,
+ * on the first provided configuration file
+ */
+ if (mvneta_dev_num)
+ goto init_devices;
+
+ MVNETA_LOG(INFO, "Perform MUSDK initializations");
+
+ ret = rte_mvep_init(MVEP_MOD_T_NETA, kvlist);
+ if (ret)
+ goto out_free_kvlist;
+
+ ret = mvneta_neta_init();
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to init NETA!");
+ rte_mvep_deinit(MVEP_MOD_T_NETA);
+ goto out_free_kvlist;
+ }
+
+init_devices:
+ for (i = 0; i < ifnum; i++) {
+ MVNETA_LOG(INFO, "Creating %s", ifnames.names[i]);
+ ret = mvneta_eth_dev_create(vdev, ifnames.names[i]);
+ if (ret)
+ goto out_cleanup;
+ }
+ mvneta_dev_num += ifnum;
+
+ rte_kvargs_free(kvlist);
+
+ return 0;
+out_cleanup:
+ for (; i > 0; i--)
+ mvneta_eth_dev_destroy_name(ifnames.names[i]);
+
+ if (mvneta_dev_num == 0) {
+ mvneta_neta_deinit();
+ rte_mvep_deinit(MVEP_MOD_T_NETA);
+ }
+out_free_kvlist:
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+/**
+ * DPDK callback to remove virtual device.
+ *
+ * @param vdev
+ * Pointer to the removed virtual device.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+rte_pmd_mvneta_remove(struct rte_vdev_device *vdev)
+{
+ int i;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (!name)
+ return -EINVAL;
+
+ MVNETA_LOG(INFO, "Removing %s", name);
+
+ RTE_ETH_FOREACH_DEV(i) {
+ if (rte_eth_devices[i].device != &vdev->device)
+ continue;
+
+ mvneta_eth_dev_destroy(&rte_eth_devices[i]);
+ mvneta_dev_num--;
+ }
+
+ if (mvneta_dev_num == 0) {
+ MVNETA_LOG(INFO, "Perform MUSDK deinit");
+ mvneta_neta_deinit();
+ rte_mvep_deinit(MVEP_MOD_T_NETA);
+ }
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_mvneta_drv = {
+ .probe = rte_pmd_mvneta_probe,
+ .remove = rte_pmd_mvneta_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_mvneta, pmd_mvneta_drv);
+RTE_PMD_REGISTER_PARAM_STRING(net_mvneta, "iface=<ifc>");
+
+RTE_INIT(mvneta_init_log)
+{
+ mvneta_logtype = rte_log_register("pmd.net.mvneta");
+ if (mvneta_logtype >= 0)
+ rte_log_set_level(mvneta_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/mvneta/mvneta_ethdev.h b/drivers/net/mvneta/mvneta_ethdev.h
new file mode 100644
index 00000000..101b0a81
--- /dev/null
+++ b/drivers/net/mvneta/mvneta_ethdev.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _MVNETA_ETHDEV_H_
+#define _MVNETA_ETHDEV_H_
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+/*
+ * container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+
+#include <drivers/mv_neta.h>
+#include <drivers/mv_neta_ppio.h>
+
+/** Packet offset inside RX buffer. */
+#define MRVL_NETA_PKT_OFFS 64
+
+/** Maximum number of rx/tx queues per port */
+#define MRVL_NETA_RXQ_MAX 8
+#define MRVL_NETA_TXQ_MAX 8
+
+/** Minimum/maximum number of descriptors in tx queue */
+#define MRVL_NETA_TXD_MIN 16
+#define MRVL_NETA_TXD_MAX 2048
+
+/** Tx queue descriptors alignment in B */
+#define MRVL_NETA_TXD_ALIGN 32
+
+/** Minimum/maximum number of descriptors in rx queue */
+#define MRVL_NETA_RXD_MIN 16
+#define MRVL_NETA_RXD_MAX 2048
+
+/** Rx queue descriptors alignment in B */
+#define MRVL_NETA_RXD_ALIGN 32
+
+#define MRVL_NETA_VLAN_TAG_LEN 4
+#define MRVL_NETA_ETH_HDRS_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
+ MRVL_NETA_VLAN_TAG_LEN)
+
+#define MRVL_NETA_HDRS_LEN (MV_MH_SIZE + MRVL_NETA_ETH_HDRS_LEN)
+#define MRVL_NETA_MTU_TO_MRU(mtu) ((mtu) + MRVL_NETA_HDRS_LEN)
+#define MRVL_NETA_MRU_TO_MTU(mru) ((mru) - MRVL_NETA_HDRS_LEN)
+
+
+struct mvneta_priv {
+ /* Hot fields, used in fast path. */
+ struct neta_ppio *ppio; /**< Port handler pointer */
+
+ uint8_t pp_id;
+ uint8_t ppio_id; /* ppio port id */
+ uint8_t uc_mc_flushed;
+ uint8_t multiseg;
+
+ struct neta_ppio_params ppio_params;
+
+ uint64_t rate_max;
+ struct rte_eth_stats prev_stats;
+};
+
+/** Current log type. */
+extern int mvneta_logtype;
+
+#define MVNETA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, mvneta_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#endif /* _MVNETA_ETHDEV_H_ */
diff --git a/drivers/net/mvneta/mvneta_rxtx.c b/drivers/net/mvneta/mvneta_rxtx.c
new file mode 100644
index 00000000..62caa684
--- /dev/null
+++ b/drivers/net/mvneta/mvneta_rxtx.c
@@ -0,0 +1,1030 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#include "mvneta_rxtx.h"
+
+#define MVNETA_PKT_EFFEC_OFFS (MRVL_NETA_PKT_OFFS + MV_MH_SIZE)
+
+#define MRVL_NETA_DEFAULT_TC 0
+
+/** Maximum number of descriptors in shadow queue. Must be power of 2 */
+#define MRVL_NETA_TX_SHADOWQ_SIZE MRVL_NETA_TXD_MAX
+
+/** Shadow queue size mask (since shadow queue size is power of 2) */
+#define MRVL_NETA_TX_SHADOWQ_MASK (MRVL_NETA_TX_SHADOWQ_SIZE - 1)
+
+/** Minimum number of sent buffers to release from shadow queue to BM */
+#define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN 16
+
+/** Maximum number of sent buffers to release from shadow queue to BM */
+#define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX 64
+
+#define MVNETA_COOKIE_ADDR_INVALID ~0ULL
+#define MVNETA_COOKIE_HIGH_ADDR_SHIFT (sizeof(neta_cookie_t) * 8)
+#define MVNETA_COOKIE_HIGH_ADDR_MASK (~0ULL << MVNETA_COOKIE_HIGH_ADDR_SHIFT)
+
+#define MVNETA_SET_COOKIE_HIGH_ADDR(addr) { \
+ if (unlikely(cookie_addr_high == MVNETA_COOKIE_ADDR_INVALID)) \
+ cookie_addr_high = \
+ (uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK;\
+}
+
+#define MVNETA_CHECK_COOKIE_HIGH_ADDR(addr) \
+ ((likely(cookie_addr_high == \
+ ((uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK))) ? 1 : 0)
+
+struct mvneta_rxq {
+ struct mvneta_priv *priv;
+ struct rte_mempool *mp;
+ int queue_id;
+ int port_id;
+ int size;
+ int cksum_enabled;
+ uint64_t bytes_recv;
+ uint64_t drop_mac;
+ uint64_t pkts_processed;
+};
+
+/*
+ * To use buffer harvesting based on loopback port shadow queue structure
+ * was introduced for buffers information bookkeeping.
+ */
+struct mvneta_shadow_txq {
+ int head; /* write index - used when sending buffers */
+ int tail; /* read index - used when releasing buffers */
+ u16 size; /* queue occupied size */
+ struct neta_buff_inf ent[MRVL_NETA_TX_SHADOWQ_SIZE]; /* q entries */
+};
+
+struct mvneta_txq {
+ struct mvneta_priv *priv;
+ int queue_id;
+ int port_id;
+ uint64_t bytes_sent;
+ struct mvneta_shadow_txq shadow_txq;
+ int tx_deferred_start;
+};
+
+static uint64_t cookie_addr_high = MVNETA_COOKIE_ADDR_INVALID;
+static uint16_t rx_desc_free_thresh = MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN;
+
+static inline int
+mvneta_buffs_refill(struct mvneta_priv *priv, struct mvneta_rxq *rxq, u16 *num)
+{
+ struct rte_mbuf *mbufs[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX];
+ struct neta_buff_inf entries[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX];
+ int i, ret;
+ uint16_t nb_desc = *num;
+
+ ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, nb_desc);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to allocate %u mbufs.", nb_desc);
+ *num = 0;
+ return -1;
+ }
+
+ MVNETA_SET_COOKIE_HIGH_ADDR(mbufs[0]);
+
+ for (i = 0; i < nb_desc; i++) {
+ if (unlikely(!MVNETA_CHECK_COOKIE_HIGH_ADDR(mbufs[i]))) {
+ MVNETA_LOG(ERR,
+ "mbuf virt high addr 0x%lx out of range 0x%lx",
+ (uint64_t)mbufs[i] >> 32,
+ cookie_addr_high >> 32);
+ *num = 0;
+ goto out;
+ }
+ entries[i].addr = rte_mbuf_data_iova_default(mbufs[i]);
+ entries[i].cookie = (neta_cookie_t)(uint64_t)mbufs[i];
+ }
+ neta_ppio_inq_put_buffs(priv->ppio, rxq->queue_id, entries, num);
+
+out:
+ for (i = *num; i < nb_desc; i++)
+ rte_pktmbuf_free(mbufs[i]);
+
+ return 0;
+}
+
+/**
+ * Allocate buffers from mempool
+ * and store addresses in rx descriptors.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static inline int
+mvneta_buffs_alloc(struct mvneta_priv *priv, struct mvneta_rxq *rxq, int *num)
+{
+ uint16_t nb_desc, nb_desc_burst, sent = 0;
+ int ret = 0;
+
+ nb_desc = *num;
+
+ do {
+ nb_desc_burst =
+ (nb_desc < MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX) ?
+ nb_desc : MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX;
+
+ ret = mvneta_buffs_refill(priv, rxq, &nb_desc_burst);
+ if (unlikely(ret || !nb_desc_burst))
+ break;
+
+ sent += nb_desc_burst;
+ nb_desc -= nb_desc_burst;
+
+ } while (nb_desc);
+
+ *num = sent;
+
+ return ret;
+}
+
+static inline void
+mvneta_fill_shadowq(struct mvneta_shadow_txq *sq, struct rte_mbuf *buf)
+{
+ sq->ent[sq->head].cookie = (uint64_t)buf;
+ sq->ent[sq->head].addr = buf ?
+ rte_mbuf_data_iova_default(buf) : 0;
+
+ sq->head = (sq->head + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
+ sq->size++;
+}
+
+static inline void
+mvneta_fill_desc(struct neta_ppio_desc *desc, struct rte_mbuf *buf)
+{
+ neta_ppio_outq_desc_reset(desc);
+ neta_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf));
+ neta_ppio_outq_desc_set_pkt_offset(desc, 0);
+ neta_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf));
+}
+
+/**
+ * Release already sent buffers to mempool.
+ *
+ * @param ppio
+ * Pointer to the port structure.
+ * @param sq
+ * Pointer to the shadow queue.
+ * @param qid
+ * Queue id number.
+ * @param force
+ * Force releasing packets.
+ */
+static inline void
+mvneta_sent_buffers_free(struct neta_ppio *ppio,
+ struct mvneta_shadow_txq *sq, int qid)
+{
+ struct neta_buff_inf *entry;
+ uint16_t nb_done = 0;
+ int i;
+ int tail = sq->tail;
+
+ neta_ppio_get_num_outq_done(ppio, qid, &nb_done);
+
+ if (nb_done > sq->size) {
+ MVNETA_LOG(ERR, "nb_done: %d, sq->size %d",
+ nb_done, sq->size);
+ return;
+ }
+
+ for (i = 0; i < nb_done; i++) {
+ entry = &sq->ent[tail];
+
+ if (unlikely(!entry->addr)) {
+ MVNETA_LOG(DEBUG,
+ "Shadow memory @%d: cookie(%lx), pa(%lx)!",
+ tail, (u64)entry->cookie,
+ (u64)entry->addr);
+ tail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
+ continue;
+ }
+
+ struct rte_mbuf *mbuf;
+
+ mbuf = (struct rte_mbuf *)
+ (cookie_addr_high | entry->cookie);
+ rte_pktmbuf_free(mbuf);
+ tail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
+ }
+
+ sq->tail = tail;
+ sq->size -= nb_done;
+}
+
+/**
+ * Return packet type information and l3/l4 offsets.
+ *
+ * @param desc
+ * Pointer to the received packet descriptor.
+ * @param l3_offset
+ * l3 packet offset.
+ * @param l4_offset
+ * l4 packet offset.
+ *
+ * @return
+ * Packet type information.
+ */
+static inline uint64_t
+mvneta_desc_to_packet_type_and_offset(struct neta_ppio_desc *desc,
+ uint8_t *l3_offset, uint8_t *l4_offset)
+{
+ enum neta_inq_l3_type l3_type;
+ enum neta_inq_l4_type l4_type;
+ uint64_t packet_type;
+
+ neta_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
+ neta_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
+
+ packet_type = RTE_PTYPE_L2_ETHER;
+
+ if (NETA_RXD_GET_VLAN_INFO(desc))
+ packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+
+ switch (l3_type) {
+ case NETA_INQ_L3_TYPE_IPV4_BAD:
+ case NETA_INQ_L3_TYPE_IPV4_OK:
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ break;
+ case NETA_INQ_L3_TYPE_IPV6:
+ packet_type |= RTE_PTYPE_L3_IPV6;
+ break;
+ default:
+ packet_type |= RTE_PTYPE_UNKNOWN;
+ MVNETA_LOG(DEBUG, "Failed to recognize l3 packet type");
+ break;
+ }
+
+ switch (l4_type) {
+ case NETA_INQ_L4_TYPE_TCP:
+ packet_type |= RTE_PTYPE_L4_TCP;
+ break;
+ case NETA_INQ_L4_TYPE_UDP:
+ packet_type |= RTE_PTYPE_L4_UDP;
+ break;
+ default:
+ packet_type |= RTE_PTYPE_UNKNOWN;
+ MVNETA_LOG(DEBUG, "Failed to recognize l4 packet type");
+ break;
+ }
+
+ return packet_type;
+}
+
+/**
+ * Prepare offload information.
+ *
+ * @param ol_flags
+ * Offload flags.
+ * @param packet_type
+ * Packet type bitfield.
+ * @param l3_type
+ * Pointer to the neta_ouq_l3_type structure.
+ * @param l4_type
+ * Pointer to the neta_outq_l4_type structure.
+ * @param gen_l3_cksum
+ * Will be set to 1 in case l3 checksum is computed.
+ * @param l4_cksum
+ * Will be set to 1 in case l4 checksum is computed.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static inline int
+mvneta_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
+ enum neta_outq_l3_type *l3_type,
+ enum neta_outq_l4_type *l4_type,
+ int *gen_l3_cksum,
+ int *gen_l4_cksum)
+{
+ /*
+ * Based on ol_flags prepare information
+ * for neta_ppio_outq_desc_set_proto_info() which setups descriptor
+ * for offloading.
+ */
+ if (ol_flags & PKT_TX_IPV4) {
+ *l3_type = NETA_OUTQ_L3_TYPE_IPV4;
+ *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
+ } else if (ol_flags & PKT_TX_IPV6) {
+ *l3_type = NETA_OUTQ_L3_TYPE_IPV6;
+ /* no checksum for ipv6 header */
+ *gen_l3_cksum = 0;
+ } else {
+ /* if something different then stop processing */
+ return -1;
+ }
+
+ ol_flags &= PKT_TX_L4_MASK;
+ if ((packet_type & RTE_PTYPE_L4_TCP) &&
+ ol_flags == PKT_TX_TCP_CKSUM) {
+ *l4_type = NETA_OUTQ_L4_TYPE_TCP;
+ *gen_l4_cksum = 1;
+ } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
+ ol_flags == PKT_TX_UDP_CKSUM) {
+ *l4_type = NETA_OUTQ_L4_TYPE_UDP;
+ *gen_l4_cksum = 1;
+ } else {
+ *l4_type = NETA_OUTQ_L4_TYPE_OTHER;
+ /* no checksum for other type */
+ *gen_l4_cksum = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * Get offload information from the received packet descriptor.
+ *
+ * @param desc
+ * Pointer to the received packet descriptor.
+ *
+ * @return
+ * Mbuf offload flags.
+ */
+static inline uint64_t
+mvneta_desc_to_ol_flags(struct neta_ppio_desc *desc)
+{
+ uint64_t flags;
+ enum neta_inq_desc_status status;
+
+ status = neta_ppio_inq_desc_get_l3_pkt_error(desc);
+ if (unlikely(status != NETA_DESC_ERR_OK))
+ flags = PKT_RX_IP_CKSUM_BAD;
+ else
+ flags = PKT_RX_IP_CKSUM_GOOD;
+
+ status = neta_ppio_inq_desc_get_l4_pkt_error(desc);
+ if (unlikely(status != NETA_DESC_ERR_OK))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ return flags;
+}
+
+/**
+ * DPDK callback for transmit.
+ *
+ * @param txq
+ * Generic pointer transmit queue.
+ * @param tx_pkts
+ * Packets to transmit.
+ * @param nb_pkts
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted.
+ */
+static uint16_t
+mvneta_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct mvneta_txq *q = txq;
+ struct mvneta_shadow_txq *sq;
+ struct neta_ppio_desc descs[nb_pkts];
+
+ int i, ret, bytes_sent = 0;
+ uint16_t num, sq_free_size;
+ uint64_t addr;
+
+ sq = &q->shadow_txq;
+ if (unlikely(!nb_pkts || !q->priv->ppio))
+ return 0;
+
+ if (sq->size)
+ mvneta_sent_buffers_free(q->priv->ppio,
+ sq, q->queue_id);
+
+ sq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1;
+ if (unlikely(nb_pkts > sq_free_size)) {
+ MVNETA_LOG(DEBUG,
+ "No room in shadow queue for %d packets! %d packets will be sent.",
+ nb_pkts, sq_free_size);
+ nb_pkts = sq_free_size;
+ }
+
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf = tx_pkts[i];
+ int gen_l3_cksum, gen_l4_cksum;
+ enum neta_outq_l3_type l3_type;
+ enum neta_outq_l4_type l4_type;
+
+ /* Fill first mbuf info in shadow queue */
+ mvneta_fill_shadowq(sq, mbuf);
+ mvneta_fill_desc(&descs[i], mbuf);
+
+ bytes_sent += rte_pktmbuf_pkt_len(mbuf);
+
+ ret = mvneta_prepare_proto_info(mbuf->ol_flags,
+ mbuf->packet_type,
+ &l3_type, &l4_type,
+ &gen_l3_cksum,
+ &gen_l4_cksum);
+ if (unlikely(ret))
+ continue;
+
+ neta_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
+ mbuf->l2_len,
+ mbuf->l2_len + mbuf->l3_len,
+ gen_l3_cksum, gen_l4_cksum);
+ }
+ num = nb_pkts;
+ neta_ppio_send(q->priv->ppio, q->queue_id, descs, &nb_pkts);
+
+
+ /* number of packets that were not sent */
+ if (unlikely(num > nb_pkts)) {
+ for (i = nb_pkts; i < num; i++) {
+ sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE + sq->head - 1) &
+ MRVL_NETA_TX_SHADOWQ_MASK;
+ addr = cookie_addr_high | sq->ent[sq->head].cookie;
+ bytes_sent -=
+ rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
+ }
+ sq->size -= num - nb_pkts;
+ }
+
+ q->bytes_sent += bytes_sent;
+
+ return nb_pkts;
+}
+
+/** DPDK callback for S/G transmit.
+ *
+ * @param txq
+ * Generic pointer transmit queue.
+ * @param tx_pkts
+ * Packets to transmit.
+ * @param nb_pkts
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted.
+ */
+static uint16_t
+mvneta_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct mvneta_txq *q = txq;
+ struct mvneta_shadow_txq *sq;
+ struct neta_ppio_desc descs[nb_pkts * NETA_PPIO_DESC_NUM_FRAGS];
+ struct neta_ppio_sg_pkts pkts;
+ uint8_t frags[nb_pkts];
+ int i, j, ret, bytes_sent = 0;
+ int tail, tail_first;
+ uint16_t num, sq_free_size;
+ uint16_t nb_segs, total_descs = 0;
+ uint64_t addr;
+
+ sq = &q->shadow_txq;
+ pkts.frags = frags;
+ pkts.num = 0;
+
+ if (unlikely(!q->priv->ppio))
+ return 0;
+
+ if (sq->size)
+ mvneta_sent_buffers_free(q->priv->ppio,
+ sq, q->queue_id);
+ /* Save shadow queue free size */
+ sq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1;
+
+ tail = 0;
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf = tx_pkts[i];
+ struct rte_mbuf *seg = NULL;
+ int gen_l3_cksum, gen_l4_cksum;
+ enum neta_outq_l3_type l3_type;
+ enum neta_outq_l4_type l4_type;
+
+ nb_segs = mbuf->nb_segs;
+ total_descs += nb_segs;
+
+ /*
+ * Check if total_descs does not exceed
+ * shadow queue free size
+ */
+ if (unlikely(total_descs > sq_free_size)) {
+ total_descs -= nb_segs;
+ MVNETA_LOG(DEBUG,
+ "No room in shadow queue for %d packets! "
+ "%d packets will be sent.",
+ nb_pkts, i);
+ break;
+ }
+
+
+ /* Check if nb_segs does not exceed the max nb of desc per
+ * fragmented packet
+ */
+ if (unlikely(nb_segs > NETA_PPIO_DESC_NUM_FRAGS)) {
+ total_descs -= nb_segs;
+ MVNETA_LOG(ERR,
+ "Too many segments. Packet won't be sent.");
+ break;
+ }
+
+ pkts.frags[pkts.num] = nb_segs;
+ pkts.num++;
+ tail_first = tail;
+
+ seg = mbuf;
+ for (j = 0; j < nb_segs - 1; j++) {
+ /* For the subsequent segments, set shadow queue
+ * buffer to NULL
+ */
+ mvneta_fill_shadowq(sq, NULL);
+ mvneta_fill_desc(&descs[tail], seg);
+
+ tail++;
+ seg = seg->next;
+ }
+ /* Put first mbuf info in last shadow queue entry */
+ mvneta_fill_shadowq(sq, mbuf);
+ /* Update descriptor with last segment */
+ mvneta_fill_desc(&descs[tail++], seg);
+
+ bytes_sent += rte_pktmbuf_pkt_len(mbuf);
+
+ ret = mvneta_prepare_proto_info(mbuf->ol_flags,
+ mbuf->packet_type,
+ &l3_type, &l4_type,
+ &gen_l3_cksum,
+ &gen_l4_cksum);
+ if (unlikely(ret))
+ continue;
+
+ neta_ppio_outq_desc_set_proto_info(&descs[tail_first],
+ l3_type, l4_type,
+ mbuf->l2_len,
+ mbuf->l2_len + mbuf->l3_len,
+ gen_l3_cksum, gen_l4_cksum);
+ }
+ num = total_descs;
+ neta_ppio_send_sg(q->priv->ppio, q->queue_id, descs, &total_descs,
+ &pkts);
+
+ /* number of packets that were not sent */
+ if (unlikely(num > total_descs)) {
+ for (i = total_descs; i < num; i++) {
+ sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE +
+ sq->head - 1) &
+ MRVL_NETA_TX_SHADOWQ_MASK;
+ addr = sq->ent[sq->head].cookie;
+ if (addr) {
+ struct rte_mbuf *mbuf;
+
+ mbuf = (struct rte_mbuf *)
+ (cookie_addr_high | addr);
+ bytes_sent -= rte_pktmbuf_pkt_len(mbuf);
+ }
+ }
+ sq->size -= num - total_descs;
+ nb_pkts = pkts.num;
+ }
+
+ q->bytes_sent += bytes_sent;
+
+ return nb_pkts;
+}
+
+/**
+ * Set tx burst function according to offload flag
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mvneta_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (priv->multiseg) {
+ MVNETA_LOG(INFO, "Using multi-segment tx callback");
+ dev->tx_pkt_burst = mvneta_tx_sg_pkt_burst;
+ } else {
+ MVNETA_LOG(INFO, "Using single-segment tx callback");
+ dev->tx_pkt_burst = mvneta_tx_pkt_burst;
+ }
+}
+
+/**
+ * DPDK callback for receive.
+ *
+ * @param rxq
+ * Generic pointer to the receive queue.
+ * @param rx_pkts
+ * Array to store received packets.
+ * @param nb_pkts
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received.
+ */
+uint16_t
+mvneta_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct mvneta_rxq *q = rxq;
+ struct neta_ppio_desc descs[nb_pkts];
+ int i, ret, rx_done = 0, rx_dropped = 0;
+
+ if (unlikely(!q || !q->priv->ppio))
+ return 0;
+
+ ret = neta_ppio_recv(q->priv->ppio, q->queue_id,
+ descs, &nb_pkts);
+
+ if (unlikely(ret < 0)) {
+ MVNETA_LOG(ERR, "Failed to receive packets");
+ return 0;
+ }
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf;
+ uint8_t l3_offset, l4_offset;
+ enum neta_inq_desc_status status;
+ uint64_t addr;
+
+ addr = cookie_addr_high |
+ neta_ppio_inq_desc_get_cookie(&descs[i]);
+ mbuf = (struct rte_mbuf *)addr;
+
+ rte_pktmbuf_reset(mbuf);
+
+ /* drop packet in case of mac, overrun or resource error */
+ status = neta_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
+ if (unlikely(status != NETA_DESC_ERR_OK)) {
+ /* Release the mbuf to the mempool since
+ * it won't be transferred to tx path
+ */
+ rte_pktmbuf_free(mbuf);
+ q->drop_mac++;
+ rx_dropped++;
+ continue;
+ }
+
+ mbuf->data_off += MVNETA_PKT_EFFEC_OFFS;
+ mbuf->pkt_len = neta_ppio_inq_desc_get_pkt_len(&descs[i]);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = q->port_id;
+ mbuf->packet_type =
+ mvneta_desc_to_packet_type_and_offset(&descs[i],
+ &l3_offset,
+ &l4_offset);
+ mbuf->l2_len = l3_offset;
+ mbuf->l3_len = l4_offset - l3_offset;
+
+ if (likely(q->cksum_enabled))
+ mbuf->ol_flags = mvneta_desc_to_ol_flags(&descs[i]);
+
+ rx_pkts[rx_done++] = mbuf;
+ q->bytes_recv += mbuf->pkt_len;
+ }
+ q->pkts_processed += rx_done + rx_dropped;
+
+ if (q->pkts_processed > rx_desc_free_thresh) {
+ int buf_to_refill = rx_desc_free_thresh;
+
+ ret = mvneta_buffs_alloc(q->priv, q, &buf_to_refill);
+ if (ret)
+ MVNETA_LOG(ERR, "Refill failed");
+ q->pkts_processed -= buf_to_refill;
+ }
+
+ return rx_done;
+}
+
+/**
+ * DPDK callback to configure the receive queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param conf
+ * Thresholds parameters (unused_).
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+int
+mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_rxconf *conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ struct mvneta_rxq *rxq;
+ uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
+ uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MVNETA_PKT_EFFEC_OFFS;
+
+ if (frame_size < max_rx_pkt_len) {
+ MVNETA_LOG(ERR,
+ "Mbuf size must be increased to %u bytes to hold up "
+ "to %u bytes of data.",
+ buf_size + max_rx_pkt_len - frame_size,
+ max_rx_pkt_len);
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ MVNETA_LOG(INFO, "Setting max rx pkt len to %u",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ }
+
+ if (dev->data->rx_queues[idx]) {
+ rte_free(dev->data->rx_queues[idx]);
+ dev->data->rx_queues[idx] = NULL;
+ }
+
+ rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
+ if (!rxq)
+ return -ENOMEM;
+
+ rxq->priv = priv;
+ rxq->mp = mp;
+ rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_IPV4_CKSUM;
+ rxq->queue_id = idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->size = desc;
+ rx_desc_free_thresh = RTE_MIN(rx_desc_free_thresh, (desc / 2));
+ priv->ppio_params.inqs_params.tcs_params[MRVL_NETA_DEFAULT_TC].size =
+ desc;
+
+ dev->data->rx_queues[idx] = rxq;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to configure the transmit queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Transmit queue index.
+ * @param desc
+ * Number of descriptors to configure in the queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param conf
+ * Tx queue configuration parameters.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+int
+mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ struct mvneta_txq *txq;
+
+ if (dev->data->tx_queues[idx]) {
+ rte_free(dev->data->tx_queues[idx]);
+ dev->data->tx_queues[idx] = NULL;
+ }
+
+ txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
+ if (!txq)
+ return -ENOMEM;
+
+ txq->priv = priv;
+ txq->queue_id = idx;
+ txq->port_id = dev->data->port_id;
+ txq->tx_deferred_start = conf->tx_deferred_start;
+ dev->data->tx_queues[idx] = txq;
+
+ priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
+ priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to release the transmit queue.
+ *
+ * @param txq
+ * Generic transmit queue pointer.
+ */
+void
+mvneta_tx_queue_release(void *txq)
+{
+ struct mvneta_txq *q = txq;
+
+ if (!q)
+ return;
+
+ rte_free(q);
+}
+
+/**
+ * Return mbufs to mempool.
+ *
+ * @param rxq
+ * Pointer to rx queue structure
+ * @param desc
+ * Array of rx descriptors
+ */
+static void
+mvneta_recv_buffs_free(struct neta_ppio_desc *desc, uint16_t num)
+{
+ uint64_t addr;
+ uint8_t i;
+
+ for (i = 0; i < num; i++) {
+ if (desc) {
+ addr = cookie_addr_high |
+ neta_ppio_inq_desc_get_cookie(desc);
+ if (addr)
+ rte_pktmbuf_free((struct rte_mbuf *)addr);
+ desc++;
+ }
+ }
+}
+
+int
+mvneta_alloc_rx_bufs(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ int ret = 0, i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct mvneta_rxq *rxq = dev->data->rx_queues[i];
+ int num = rxq->size;
+
+ ret = mvneta_buffs_alloc(priv, rxq, &num);
+ if (ret || num != rxq->size) {
+ rte_free(rxq);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Flush single receive queue.
+ *
+ * @param rxq
+ * Pointer to rx queue structure.
+ * @param descs
+ * Array of rx descriptors
+ */
+static void
+mvneta_rx_queue_flush(struct mvneta_rxq *rxq)
+{
+ struct neta_ppio_desc *descs;
+ struct neta_buff_inf *bufs;
+ uint16_t num;
+ int ret, i;
+
+ descs = rte_malloc("rxdesc", MRVL_NETA_RXD_MAX * sizeof(*descs), 0);
+ bufs = rte_malloc("buffs", MRVL_NETA_RXD_MAX * sizeof(*bufs), 0);
+
+ do {
+ num = MRVL_NETA_RXD_MAX;
+ ret = neta_ppio_recv(rxq->priv->ppio,
+ rxq->queue_id,
+ descs, &num);
+ mvneta_recv_buffs_free(descs, num);
+ } while (ret == 0 && num);
+
+ rxq->pkts_processed = 0;
+
+ num = MRVL_NETA_RXD_MAX;
+
+ neta_ppio_inq_get_all_buffs(rxq->priv->ppio, rxq->queue_id, bufs, &num);
+ MVNETA_LOG(INFO, "freeing %u unused bufs.", num);
+
+ for (i = 0; i < num; i++) {
+ uint64_t addr;
+ if (bufs[i].cookie) {
+ addr = cookie_addr_high | bufs[i].cookie;
+ rte_pktmbuf_free((struct rte_mbuf *)addr);
+ }
+ }
+
+ rte_free(descs);
+ rte_free(bufs);
+}
+
+/**
+ * Flush single transmit queue.
+ *
+ * @param txq
+ * Pointer to tx queue structure
+ */
+static void
+mvneta_tx_queue_flush(struct mvneta_txq *txq)
+{
+ struct mvneta_shadow_txq *sq = &txq->shadow_txq;
+
+ if (sq->size)
+ mvneta_sent_buffers_free(txq->priv->ppio, sq,
+ txq->queue_id);
+
+ /* free the rest of them */
+ while (sq->tail != sq->head) {
+ uint64_t addr = cookie_addr_high |
+ sq->ent[sq->tail].cookie;
+ rte_pktmbuf_free((struct rte_mbuf *)addr);
+ sq->tail = (sq->tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
+ }
+ memset(sq, 0, sizeof(*sq));
+}
+
+void
+mvneta_flush_queues(struct rte_eth_dev *dev)
+{
+ int i;
+
+ MVNETA_LOG(INFO, "Flushing rx queues");
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct mvneta_rxq *rxq = dev->data->rx_queues[i];
+
+ mvneta_rx_queue_flush(rxq);
+ }
+
+ MVNETA_LOG(INFO, "Flushing tx queues");
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mvneta_txq *txq = dev->data->tx_queues[i];
+
+ mvneta_tx_queue_flush(txq);
+ }
+}
+
+/**
+ * DPDK callback to release the receive queue.
+ *
+ * @param rxq
+ * Generic receive queue pointer.
+ */
+void
+mvneta_rx_queue_release(void *rxq)
+{
+ struct mvneta_rxq *q = rxq;
+
+ if (!q)
+ return;
+
+ /* If dev_stop was called already, mbufs are already
+ * returned to mempool and ppio is deinitialized.
+ * Skip this step.
+ */
+
+ if (q->priv->ppio)
+ mvneta_rx_queue_flush(q);
+
+ rte_free(rxq);
+}
+
+/**
+ * DPDK callback to get information about specific receive queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Receive queue index.
+ * @param qinfo
+ * Receive queue information structure.
+ */
+void
+mvneta_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct mvneta_rxq *q = dev->data->rx_queues[rx_queue_id];
+
+ qinfo->mp = q->mp;
+ qinfo->nb_desc = q->size;
+}
+
+/**
+ * DPDK callback to get information about specific transmit queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param tx_queue_id
+ * Transmit queue index.
+ * @param qinfo
+ * Transmit queue information structure.
+ */
+void
+mvneta_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+
+ qinfo->nb_desc =
+ priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
+}
diff --git a/drivers/net/mvneta/mvneta_rxtx.h b/drivers/net/mvneta/mvneta_rxtx.h
new file mode 100644
index 00000000..cc291901
--- /dev/null
+++ b/drivers/net/mvneta/mvneta_rxtx.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _MVNETA_RXTX_H_
+#define _MVNETA_RXTX_H_
+
+#include "mvneta_ethdev.h"
+
+int mvneta_alloc_rx_bufs(struct rte_eth_dev *dev);
+
+void mvneta_flush_queues(struct rte_eth_dev *dev);
+
+void mvneta_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo);
+void mvneta_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo);
+
+void mvneta_set_tx_function(struct rte_eth_dev *dev);
+
+uint16_t
+mvneta_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+int
+mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_rxconf *conf __rte_unused,
+ struct rte_mempool *mp);
+int
+mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf);
+
+void mvneta_rx_queue_release(void *rxq);
+void mvneta_tx_queue_release(void *txq);
+
+#endif /* _MVNETA_RXTX_H_ */
diff --git a/drivers/net/mvneta/rte_pmd_mvneta_version.map b/drivers/net/mvneta/rte_pmd_mvneta_version.map
new file mode 100644
index 00000000..24bd5cdb
--- /dev/null
+++ b/drivers/net/mvneta/rte_pmd_mvneta_version.map
@@ -0,0 +1,3 @@
+DPDK_18.11 {
+ local: *;
+};