aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mrvl/mrvl_ethdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mrvl/mrvl_ethdev.c')
-rw-r--r--drivers/net/mrvl/mrvl_ethdev.c565
1 files changed, 391 insertions, 174 deletions
diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mrvl/mrvl_ethdev.c
index 29361652..705c4bd8 100644
--- a/drivers/net/mrvl/mrvl_ethdev.c
+++ b/drivers/net/mrvl/mrvl_ethdev.c
@@ -32,7 +32,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_kvargs.h>
#include <rte_log.h>
#include <rte_malloc.h>
@@ -47,10 +47,6 @@
#undef container_of
#endif
-#include <drivers/mv_pp2.h>
-#include <drivers/mv_pp2_bpool.h>
-#include <drivers/mv_pp2_hif.h>
-
#include <fcntl.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
@@ -98,6 +94,17 @@
/* Memory size (in bytes) for MUSDK dma buffers */
#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+/** Port Rx offload capabilities */
+#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CRC_STRIP | \
+ DEV_RX_OFFLOAD_CHECKSUM)
+
+/** Port Tx offloads capabilities */
+#define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM)
+
static const char * const valid_args[] = {
MRVL_IFACE_NAME_ARG,
MRVL_CFG_ARG,
@@ -115,6 +122,11 @@ struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
+struct mrvl_ifnames {
+ const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
+ int idx;
+};
+
/*
* To use buffer harvesting based on loopback port shadow queue structure
* was introduced for buffers information bookkeeping.
@@ -149,21 +161,17 @@ struct mrvl_txq {
int queue_id;
int port_id;
uint64_t bytes_sent;
+ struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
};
-/*
- * Every tx queue should have dedicated shadow tx queue.
- *
- * Ports assigned by DPDK might not start at zero or be continuous so
- * as a workaround define shadow queues for each possible port so that
- * we eventually fit somewhere.
- */
-struct mrvl_shadow_txq shadow_txqs[RTE_MAX_ETHPORTS][RTE_MAX_LCORE];
-
-/** Number of ports configured. */
-int mrvl_ports_nb;
static int mrvl_lcore_first;
static int mrvl_lcore_last;
+static int mrvl_dev_num;
+
+static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
+static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
+ struct pp2_hif *hif, unsigned int core_id,
+ struct mrvl_shadow_txq *sq, int qid, int force);
static inline int
mrvl_get_bpool_size(int pp2_id, int pool_id)
@@ -190,6 +198,59 @@ mrvl_reserve_bit(int *bitmap, int max)
return n;
}
+static int
+mrvl_init_hif(int core_id)
+{
+ struct pp2_hif_params params;
+ char match[MRVL_MATCH_LEN];
+ int ret;
+
+ ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ return ret;
+ }
+
+ snprintf(match, sizeof(match), "hif-%d", ret);
+ memset(&params, 0, sizeof(params));
+ params.match = match;
+ params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
+ ret = pp2_hif_init(&params, &hifs[core_id]);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline struct pp2_hif*
+mrvl_get_hif(struct mrvl_priv *priv, int core_id)
+{
+ int ret;
+
+ if (likely(hifs[core_id] != NULL))
+ return hifs[core_id];
+
+ rte_spinlock_lock(&priv->lock);
+
+ ret = mrvl_init_hif(core_id);
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ goto out;
+ }
+
+ if (core_id < mrvl_lcore_first)
+ mrvl_lcore_first = core_id;
+
+ if (core_id > mrvl_lcore_last)
+ mrvl_lcore_last = core_id;
+out:
+ rte_spinlock_unlock(&priv->lock);
+
+ return hifs[core_id];
+}
+
/**
* Configure rss based on dpdk rss configuration.
*
@@ -252,13 +313,13 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
RTE_LOG(INFO, PMD,
"L2 CRC stripping is always enabled in hw\n");
- dev->data->dev_conf.rxmode.hw_strip_crc = 1;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
- if (dev->data->dev_conf.rxmode.hw_vlan_strip) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
return -EINVAL;
}
@@ -268,17 +329,17 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.enable_lro) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
RTE_LOG(INFO, PMD, "LRO not supported\n");
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.jumbo_frame)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
ETHER_HDR_LEN - ETHER_CRC_LEN;
@@ -328,6 +389,9 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
return -EINVAL;
+ if (!priv->ppio)
+ return 0;
+
ret = pp2_ppio_set_mru(priv->ppio, mru);
if (ret)
return ret;
@@ -350,6 +414,9 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
+ if (!priv->ppio)
+ return -EPERM;
+
ret = pp2_ppio_enable(priv->ppio);
if (ret)
return ret;
@@ -365,8 +432,6 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
if (ret)
pp2_ppio_disable(priv->ppio);
- dev->data->dev_link.link_status = ETH_LINK_UP;
-
return ret;
}
@@ -383,15 +448,11 @@ static int
mrvl_dev_set_link_down(struct rte_eth_dev *dev)
{
struct mrvl_priv *priv = dev->data->dev_private;
- int ret;
- ret = pp2_ppio_disable(priv->ppio);
- if (ret)
- return ret;
+ if (!priv->ppio)
+ return -EPERM;
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
-
- return ret;
+ return pp2_ppio_disable(priv->ppio);
}
/**
@@ -408,20 +469,13 @@ mrvl_dev_start(struct rte_eth_dev *dev)
{
struct mrvl_priv *priv = dev->data->dev_private;
char match[MRVL_MATCH_LEN];
- int ret;
+ int ret = 0, def_init_size;
snprintf(match, sizeof(match), "ppio-%d:%d",
priv->pp_id, priv->ppio_id);
priv->ppio_params.match = match;
/*
- * Calculate the maximum bpool size for refill feature to 1.5 of the
- * configured size. In case the bpool size will exceed this value,
- * superfluous buffers will be removed
- */
- priv->bpool_max_size = priv->bpool_init_size +
- (priv->bpool_init_size >> 1);
- /*
* Calculate the minimum bpool size for refill feature as follows:
* 2 default burst sizes multiply by number of rx queues.
* If the bpool size will be below this value, new buffers will
@@ -429,9 +483,34 @@ mrvl_dev_start(struct rte_eth_dev *dev)
*/
priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
+ /* In case initial bpool size configured in queues setup is
+ * smaller than minimum size add more buffers
+ */
+ def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
+ if (priv->bpool_init_size < def_init_size) {
+ int buffs_to_add = def_init_size - priv->bpool_init_size;
+
+ priv->bpool_init_size += buffs_to_add;
+ ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n");
+ }
+
+ /*
+ * Calculate the maximum bpool size for refill feature as follows:
+ * maximum number of descriptors in rx queue multiply by number
+ * of rx queues plus minimum bpool size.
+ * In case the bpool size will exceed this value, superfluous buffers
+ * will be removed
+ */
+ priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
+ priv->bpool_min_size;
+
ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
- if (ret)
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to init ppio\n");
return ret;
+ }
/*
* In case there are some some stale uc/mc mac addresses flush them
@@ -466,17 +545,20 @@ mrvl_dev_start(struct rte_eth_dev *dev)
if (mrvl_qos_cfg) {
ret = mrvl_start_qos_mapping(priv);
if (ret) {
- pp2_ppio_deinit(priv->ppio);
- return ret;
+ RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n");
+ goto out;
}
}
ret = mrvl_dev_set_link_up(dev);
- if (ret)
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to set link up\n");
goto out;
+ }
return 0;
out:
+ RTE_LOG(ERR, PMD, "Failed to start device\n");
pp2_ppio_deinit(priv->ppio);
return ret;
}
@@ -518,21 +600,32 @@ mrvl_flush_rx_queues(struct rte_eth_dev *dev)
static void
mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
{
- int i;
+ int i, j;
+ struct mrvl_txq *txq;
RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- struct mrvl_shadow_txq *sq =
- &shadow_txqs[dev->data->port_id][i];
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = (struct mrvl_txq *)dev->data->tx_queues[i];
- while (sq->tail != sq->head) {
- uint64_t addr = cookie_addr_high |
+ for (j = 0; j < RTE_MAX_LCORE; j++) {
+ struct mrvl_shadow_txq *sq;
+
+ if (!hifs[j])
+ continue;
+
+ sq = &txq->shadow_txqs[j];
+ mrvl_free_sent_buffers(txq->priv->ppio,
+ hifs[j], j, sq, txq->queue_id, 1);
+ while (sq->tail != sq->head) {
+ uint64_t addr = cookie_addr_high |
sq->ent[sq->tail].buff.cookie;
- rte_pktmbuf_free((struct rte_mbuf *)addr);
- sq->tail = (sq->tail + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
+ rte_pktmbuf_free(
+ (struct rte_mbuf *)addr);
+ sq->tail = (sq->tail + 1) &
+ MRVL_PP2_TX_SHADOWQ_MASK;
+ }
+ memset(sq, 0, sizeof(*sq));
}
-
- memset(sq, 0, sizeof(*sq));
}
}
@@ -546,8 +639,15 @@ static void
mrvl_flush_bpool(struct rte_eth_dev *dev)
{
struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_hif *hif;
uint32_t num;
int ret;
+ unsigned int core_id = rte_lcore_id();
+
+ if (core_id == LCORE_ID_ANY)
+ core_id = 0;
+
+ hif = mrvl_get_hif(priv, core_id);
ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
if (ret) {
@@ -559,8 +659,7 @@ mrvl_flush_bpool(struct rte_eth_dev *dev)
struct pp2_buff_inf inf;
uint64_t addr;
- ret = pp2_bpool_get_buff(hifs[rte_lcore_id()], priv->bpool,
- &inf);
+ ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
if (ret)
break;
@@ -583,8 +682,10 @@ mrvl_dev_stop(struct rte_eth_dev *dev)
mrvl_dev_set_link_down(dev);
mrvl_flush_rx_queues(dev);
mrvl_flush_tx_shadow_queues(dev);
- if (priv->qos_tbl)
+ if (priv->qos_tbl) {
pp2_cls_qos_tbl_deinit(priv->qos_tbl);
+ priv->qos_tbl = NULL;
+ }
pp2_ppio_deinit(priv->ppio);
priv->ppio = NULL;
}
@@ -632,9 +733,13 @@ mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
* TODO
* once MUSDK provides necessary API use it here
*/
+ struct mrvl_priv *priv = dev->data->dev_private;
struct ethtool_cmd edata;
struct ifreq req;
- int ret, fd;
+ int ret, fd, link_up;
+
+ if (!priv->ppio)
+ return -EPERM;
edata.cmd = ETHTOOL_GSET;
@@ -674,6 +779,8 @@ mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
ETH_LINK_HALF_DUPLEX;
dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
ETH_LINK_FIXED;
+ pp2_ppio_get_link_state(priv->ppio, &link_up);
+ dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
return 0;
}
@@ -690,7 +797,10 @@ mrvl_promiscuous_enable(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
- ret = pp2_ppio_set_uc_promisc(priv->ppio, 1);
+ if (!priv->ppio)
+ return;
+
+ ret = pp2_ppio_set_promisc(priv->ppio, 1);
if (ret)
RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
}
@@ -707,6 +817,9 @@ mrvl_allmulticast_enable(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
+ if (!priv->ppio)
+ return;
+
ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
if (ret)
RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
@@ -724,7 +837,10 @@ mrvl_promiscuous_disable(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
- ret = pp2_ppio_set_uc_promisc(priv->ppio, 0);
+ if (!priv->ppio)
+ return;
+
+ ret = pp2_ppio_set_promisc(priv->ppio, 0);
if (ret)
RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
}
@@ -741,6 +857,9 @@ mrvl_allmulticast_disable(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
+ if (!priv->ppio)
+ return;
+
ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
if (ret)
RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
@@ -761,6 +880,9 @@ mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
char buf[ETHER_ADDR_FMT_SIZE];
int ret;
+ if (!priv->ppio)
+ return;
+
ret = pp2_ppio_remove_mac_addr(priv->ppio,
dev->data->mac_addrs[index].addr_bytes);
if (ret) {
@@ -797,6 +919,9 @@ mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
/* For setting index 0, mrvl_mac_addr_set() should be used.*/
return -1;
+ if (!priv->ppio)
+ return 0;
+
/*
* Maximum number of uc addresses can be tuned via kernel module mvpp2x
* parameter uc_filter_max. Maximum number of mc addresses is then
@@ -832,15 +957,17 @@ static void
mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
- pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
- /*
- * TODO
- * Port stops sending packets if pp2_ppio_set_mac_addr()
- * was called after pp2_ppio_enable(). As a quick fix issue
- * enable port once again.
- */
- pp2_ppio_enable(priv->ppio);
+ if (!priv->ppio)
+ return;
+
+ ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
+ if (ret) {
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, sizeof(buf), mac_addr);
+ RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf);
+ }
}
/**
@@ -862,6 +989,9 @@ mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
uint64_t drop_mac = 0;
unsigned int i, idx, ret;
+ if (!priv->ppio)
+ return -EPERM;
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct mrvl_rxq *rxq = dev->data->rx_queues[i];
struct pp2_ppio_inq_statistics rx_stats;
@@ -954,6 +1084,9 @@ mrvl_stats_reset(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int i;
+ if (!priv->ppio)
+ return;
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct mrvl_rxq *rxq = dev->data->rx_queues[i];
@@ -1002,15 +1135,11 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
- info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ info->rx_offload_capa = MRVL_RX_OFFLOADS;
+ info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
- info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ info->tx_offload_capa = MRVL_TX_OFFLOADS;
+ info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
info->flow_type_rss_offloads = ETH_RSS_IPV4 |
ETH_RSS_NONFRAG_IPV4_TCP |
@@ -1018,6 +1147,7 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
/* By default packets are dropped if no descriptors are available */
info->default_rxconf.rx_drop_en = 1;
+ info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP;
info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
}
@@ -1110,6 +1240,9 @@ mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct mrvl_priv *priv = dev->data->dev_private;
+ if (!priv->ppio)
+ return -EPERM;
+
return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
pp2_ppio_remove_vlan(priv->ppio, vlan_id);
}
@@ -1131,9 +1264,19 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
int i, ret;
- unsigned int core_id = rte_lcore_id();
- struct pp2_hif *hif = hifs[core_id];
- struct pp2_bpool *bpool = rxq->priv->bpool;
+ unsigned int core_id;
+ struct pp2_hif *hif;
+ struct pp2_bpool *bpool;
+
+ core_id = rte_lcore_id();
+ if (core_id == LCORE_ID_ANY)
+ core_id = 0;
+
+ hif = mrvl_get_hif(rxq->priv, core_id);
+ if (!hif)
+ return -1;
+
+ bpool = rxq->priv->bpool;
ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
if (ret)
@@ -1173,6 +1316,42 @@ out:
}
/**
+ * Check whether requested rx queue offloads match port offloads.
+ *
+ * @param
+ * dev Pointer to the device.
+ * @param
+ * requested Bitmap of the requested offloads.
+ *
+ * @return
+ * 1 if requested offloads are okay, 0 otherwise.
+ */
+static int
+mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported = MRVL_RX_OFFLOADS;
+ uint64_t unsupported = requested & ~supported;
+ uint64_t missing = mandatory & ~requested;
+
+ if (unsupported) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
+ "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
+ requested, supported);
+ return 0;
+ }
+
+ if (missing) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
+ "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
+ requested, missing);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
* DPDK callback to configure the receive queue.
*
* @param dev
@@ -1184,7 +1363,7 @@ out:
* @param socket
* NUMA socket on which memory must be allocated.
* @param conf
- * Thresholds parameters (unused_).
+ * Thresholds parameters.
* @param mp
* Memory pool for buffer allocations.
*
@@ -1194,7 +1373,7 @@ out:
static int
mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket,
- const struct rte_eth_rxconf *conf __rte_unused,
+ const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
struct mrvl_priv *priv = dev->data->dev_private;
@@ -1203,6 +1382,9 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
int ret, tc, inq;
+ if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
+ return -ENOTSUP;
+
if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
/*
* Unknown TC mapping, mapping will not have a correct queue.
@@ -1234,7 +1416,8 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rxq->priv = priv;
rxq->mp = mp;
- rxq->cksum_enabled = dev->data->dev_conf.rxmode.hw_ip_checksum;
+ rxq->cksum_enabled =
+ dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
rxq->queue_id = idx;
rxq->port_id = dev->data->port_id;
mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -1269,8 +1452,15 @@ mrvl_rx_queue_release(void *rxq)
struct mrvl_rxq *q = rxq;
struct pp2_ppio_tc_params *tc_params;
int i, num, tc, inq;
+ struct pp2_hif *hif;
+ unsigned int core_id = rte_lcore_id();
- if (!q)
+ if (core_id == LCORE_ID_ANY)
+ core_id = 0;
+
+ hif = mrvl_get_hif(q->priv, core_id);
+
+ if (!q || !hif)
return;
tc = q->priv->rxq_map[q->queue_id].tc;
@@ -1281,7 +1471,7 @@ mrvl_rx_queue_release(void *rxq)
struct pp2_buff_inf inf;
uint64_t addr;
- pp2_bpool_get_buff(hifs[rte_lcore_id()], q->priv->bpool, &inf);
+ pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
addr = cookie_addr_high | inf.cookie;
rte_pktmbuf_free((struct rte_mbuf *)addr);
}
@@ -1290,6 +1480,42 @@ mrvl_rx_queue_release(void *rxq)
}
/**
+ * Check whether requested tx queue offloads match port offloads.
+ *
+ * @param
+ * dev Pointer to the device.
+ * @param
+ * requested Bitmap of the requested offloads.
+ *
+ * @return
+ * 1 if requested offloads are okay, 0 otherwise.
+ */
+static int
+mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported = MRVL_TX_OFFLOADS;
+ uint64_t unsupported = requested & ~supported;
+ uint64_t missing = mandatory & ~requested;
+
+ if (unsupported) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
+ "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
+ requested, supported);
+ return 0;
+ }
+
+ if (missing) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
+ "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
+ requested, missing);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
* DPDK callback to configure the transmit queue.
*
* @param dev
@@ -1301,7 +1527,7 @@ mrvl_rx_queue_release(void *rxq)
* @param socket
* NUMA socket on which memory must be allocated.
* @param conf
- * Thresholds parameters (unused).
+ * Thresholds parameters.
*
* @return
* 0 on success, negative error value otherwise.
@@ -1309,11 +1535,14 @@ mrvl_rx_queue_release(void *rxq)
static int
mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket,
- const struct rte_eth_txconf *conf __rte_unused)
+ const struct rte_eth_txconf *conf)
{
struct mrvl_priv *priv = dev->data->dev_private;
struct mrvl_txq *txq;
+ if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
+ return -ENOTSUP;
+
if (dev->data->tx_queues[idx]) {
rte_free(dev->data->tx_queues[idx]);
dev->data->tx_queues[idx] = NULL;
@@ -1557,9 +1786,12 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
struct pp2_bpool *bpool;
int i, ret, rx_done = 0;
int num;
+ struct pp2_hif *hif;
unsigned int core_id = rte_lcore_id();
- if (unlikely(!q->priv->ppio))
+ hif = mrvl_get_hif(q->priv, core_id);
+
+ if (unlikely(!q->priv->ppio || !hif))
return 0;
bpool = q->priv->bpool;
@@ -1602,7 +1834,7 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
.cookie = (pp2_cookie_t)(uint64_t)mbuf,
};
- pp2_bpool_put_buff(hifs[core_id], bpool, &binf);
+ pp2_bpool_put_buff(hif, bpool, &binf);
mrvl_port_bpool_size
[bpool->pp2_id][bpool->id][core_id]++;
q->drop_mac++;
@@ -1648,14 +1880,15 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
q->priv->bpool_init_size);
for (i = 0; i < pkt_to_remove; i++) {
- pp2_bpool_get_buff(hifs[core_id], bpool, &buff);
+ ret = pp2_bpool_get_buff(hif, bpool, &buff);
+ if (ret)
+ break;
mbuf = (struct rte_mbuf *)
(cookie_addr_high | buff.cookie);
rte_pktmbuf_free(mbuf);
}
mrvl_port_bpool_size
- [bpool->pp2_id][bpool->id][core_id] -=
- pkt_to_remove;
+ [bpool->pp2_id][bpool->id][core_id] -= i;
}
rte_spinlock_unlock(&q->priv->lock);
}
@@ -1740,11 +1973,12 @@ mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
*/
static inline void
mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
- struct mrvl_shadow_txq *sq, int qid, int force)
+ unsigned int core_id, struct mrvl_shadow_txq *sq,
+ int qid, int force)
{
struct buff_release_entry *entry;
uint16_t nb_done = 0, num = 0, skip_bufs = 0;
- int i, core_id = rte_lcore_id();
+ int i;
pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
@@ -1791,6 +2025,7 @@ skip:
sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
sq->size -= num;
num = 0;
+ skip_bufs = 0;
}
if (likely(num)) {
@@ -1817,18 +2052,23 @@ static uint16_t
mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct mrvl_txq *q = txq;
- struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()];
- struct pp2_hif *hif = hifs[rte_lcore_id()];
+ struct mrvl_shadow_txq *sq;
+ struct pp2_hif *hif;
struct pp2_ppio_desc descs[nb_pkts];
+ unsigned int core_id = rte_lcore_id();
int i, ret, bytes_sent = 0;
uint16_t num, sq_free_size;
uint64_t addr;
- if (unlikely(!q->priv->ppio))
+ hif = mrvl_get_hif(q->priv, core_id);
+ sq = &q->shadow_txqs[core_id];
+
+ if (unlikely(!q->priv->ppio || !hif))
return 0;
if (sq->size)
- mrvl_free_sent_buffers(q->priv->ppio, hif, sq, q->queue_id, 0);
+ mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
+ sq, q->queue_id, 0);
sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
if (unlikely(nb_pkts > sq_free_size)) {
@@ -1856,8 +2096,9 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
sq->ent[sq->head].buff.addr =
rte_mbuf_data_iova_default(mbuf);
sq->ent[sq->head].bpool =
- (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
- NULL : mrvl_port_to_bpool_lookup[mbuf->port];
+ (unlikely(mbuf->port >= RTE_MAX_ETHPORTS ||
+ mbuf->refcnt > 1)) ? NULL :
+ mrvl_port_to_bpool_lookup[mbuf->port];
sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
sq->size++;
@@ -2034,6 +2275,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
eth_dev->data->dev_private = priv;
eth_dev->device = &vdev->device;
eth_dev->dev_ops = &mrvl_ops;
@@ -2067,6 +2309,7 @@ mrvl_eth_dev_destroy(const char *name)
priv = eth_dev->data->dev_private;
pp2_bpool_deinit(priv->bpool);
+ used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
rte_free(priv);
rte_free(eth_dev->data->mac_addrs);
rte_eth_dev_release_port(eth_dev);
@@ -2090,41 +2333,9 @@ static int
mrvl_get_ifnames(const char *key __rte_unused, const char *value,
void *extra_args)
{
- const char **ifnames = extra_args;
-
- ifnames[mrvl_ports_nb++] = value;
-
- return 0;
-}
-
-/**
- * Initialize per-lcore MUSDK hardware interfaces (hifs).
- *
- * @return
- * 0 on success, negative error value otherwise.
- */
-static int
-mrvl_init_hifs(void)
-{
- struct pp2_hif_params params;
- char match[MRVL_MATCH_LEN];
- int i, ret;
+ struct mrvl_ifnames *ifnames = extra_args;
- RTE_LCORE_FOREACH(i) {
- ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
- if (ret < 0)
- return ret;
-
- snprintf(match, sizeof(match), "hif-%d", ret);
- memset(&params, 0, sizeof(params));
- params.match = match;
- params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
- ret = pp2_hif_init(&params, &hifs[i]);
- if (ret) {
- RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", i);
- return ret;
- }
- }
+ ifnames->names[ifnames->idx++] = value;
return 0;
}
@@ -2137,19 +2348,12 @@ mrvl_deinit_hifs(void)
{
int i;
- RTE_LCORE_FOREACH(i) {
+ for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
if (hifs[i])
pp2_hif_deinit(hifs[i]);
}
-}
-
-static void mrvl_set_first_last_cores(int core_id)
-{
- if (core_id < mrvl_lcore_first)
- mrvl_lcore_first = core_id;
-
- if (core_id > mrvl_lcore_last)
- mrvl_lcore_last = core_id;
+ used_hifs = MRVL_MUSDK_HIFS_RESERVED;
+ memset(hifs, 0, sizeof(hifs));
}
/**
@@ -2165,9 +2369,9 @@ static int
rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
{
struct rte_kvargs *kvlist;
- const char *ifnames[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
+ struct mrvl_ifnames ifnames;
int ret = -EINVAL;
- uint32_t i, ifnum, cfgnum, core_id;
+ uint32_t i, ifnum, cfgnum;
const char *params;
params = rte_vdev_device_args(vdev);
@@ -2179,21 +2383,34 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
return -EINVAL;
ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
- if (ifnum > RTE_DIM(ifnames))
+ if (ifnum > RTE_DIM(ifnames.names))
goto out_free_kvlist;
+ ifnames.idx = 0;
rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
mrvl_get_ifnames, &ifnames);
- cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
- if (cfgnum > 1) {
- RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
- goto out_free_kvlist;
- } else if (cfgnum == 1) {
- rte_kvargs_process(kvlist, MRVL_CFG_ARG,
- mrvl_get_qoscfg, &mrvl_qos_cfg);
+
+ /*
+ * The below system initialization should be done only once,
+ * on the first provided configuration file
+ */
+ if (!mrvl_qos_cfg) {
+ cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
+ RTE_LOG(INFO, PMD, "Parsing config file!\n");
+ if (cfgnum > 1) {
+ RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
+ goto out_free_kvlist;
+ } else if (cfgnum == 1) {
+ rte_kvargs_process(kvlist, MRVL_CFG_ARG,
+ mrvl_get_qoscfg, &mrvl_qos_cfg);
+ }
}
+ if (mrvl_dev_num)
+ goto init_devices;
+
+ RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n");
/*
* ret == -EEXIST is correct, it means DMA
* has been already initialized (by another PMD).
@@ -2213,37 +2430,33 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
goto out_deinit_dma;
}
- ret = mrvl_init_hifs();
- if (ret)
- goto out_deinit_hifs;
+ memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
+ memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
+ mrvl_lcore_first = RTE_MAX_LCORE;
+ mrvl_lcore_last = 0;
+
+init_devices:
for (i = 0; i < ifnum; i++) {
- RTE_LOG(INFO, PMD, "Creating %s\n", ifnames[i]);
- ret = mrvl_eth_dev_create(vdev, ifnames[i]);
+ RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]);
+ ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
if (ret)
goto out_cleanup;
}
+ mrvl_dev_num += ifnum;
rte_kvargs_free(kvlist);
- memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
-
- mrvl_lcore_first = RTE_MAX_LCORE;
- mrvl_lcore_last = 0;
-
- RTE_LCORE_FOREACH(core_id) {
- mrvl_set_first_last_cores(core_id);
- }
-
return 0;
out_cleanup:
for (; i > 0; i--)
- mrvl_eth_dev_destroy(ifnames[i]);
-out_deinit_hifs:
- mrvl_deinit_hifs();
- mrvl_deinit_pp2();
+ mrvl_eth_dev_destroy(ifnames.names[i]);
+
+ if (mrvl_dev_num == 0)
+ mrvl_deinit_pp2();
out_deinit_dma:
- mv_sys_dma_mem_destroy();
+ if (mrvl_dev_num == 0)
+ mv_sys_dma_mem_destroy();
out_free_kvlist:
rte_kvargs_free(kvlist);
@@ -2276,11 +2489,15 @@ rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
rte_eth_dev_get_name_by_port(i, ifname);
mrvl_eth_dev_destroy(ifname);
+ mrvl_dev_num--;
}
- mrvl_deinit_hifs();
- mrvl_deinit_pp2();
- mv_sys_dma_mem_destroy();
+ if (mrvl_dev_num == 0) {
+ RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n");
+ mrvl_deinit_hifs();
+ mrvl_deinit_pp2();
+ mv_sys_dma_mem_destroy();
+ }
return 0;
}