diff options
Diffstat (limited to 'drivers/net/dpaa/dpaa_ethdev.c')
-rw-r--r-- | drivers/net/dpaa/dpaa_ethdev.c | 213 |
1 files changed, 155 insertions, 58 deletions
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index 9b69ef45..7a950ac0 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -45,14 +45,43 @@ #include <fsl_bman.h> #include <fsl_fman.h> +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_JUMBO_FRAME; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + /* Keep track of whether QMAN and BMAN have been globally initialized */ static int is_global_init; -/* At present we only allow up to 4 push mode queues - as each of this queue - * need dedicated portal and we are short of portals. +static int default_q; /* use default queue - FMC is not executed*/ +/* At present we only allow up to 4 push mode queues as default - as each of + * this queue need dedicated portal and we are short of portals. */ -#define DPAA_MAX_PUSH_MODE_QUEUE 4 +#define DPAA_MAX_PUSH_MODE_QUEUE 8 +#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4 -static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; +static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE; static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ @@ -95,6 +124,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { static struct rte_dpaa_driver rte_dpaa_pmd; +static void +dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); + static inline void dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) { @@ -122,9 +154,11 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) return -EINVAL; if (frame_size > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads &= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; @@ -134,13 +168,32 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } static int -dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +dpaa_eth_dev_configure(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + /* Rx offloads validation */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA_PMD_WARN( + "Rx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads validation */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA_PMD_WARN( + "Tx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= DPAA_MAX_RX_PKT_LEN) { fman_if_set_maxfrm(dpaa_intf->fif, @@ -256,14 +309,12 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev, dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G); - dev_info->rx_offload_capa = - (DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM); - dev_info->tx_offload_capa = - (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM); + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; + dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; + dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; } static int dpaa_eth_link_update(struct rte_eth_dev *dev, @@ -275,9 +326,9 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (dpaa_intf->fif->mac_type == fman_mac_1g) - link->link_speed = 1000; + link->link_speed = ETH_SPEED_NUM_1G; else if (dpaa_intf->fif->mac_type == fman_mac_10g) - link->link_speed = 10000; + link->link_speed = ETH_SPEED_NUM_10G; else DPAA_PMD_ERR("invalid link_speed: %s, %d", dpaa_intf->name, dpaa_intf->fif->mac_type); @@ -316,12 +367,12 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); uint64_t values[sizeof(struct dpaa_if_stats) / 8]; - if (xstats == NULL) - return 0; - if (n < num) return num; + if (xstats == NULL) + return 0; + fman_if_stats_get_all(dpaa_intf->fif, values, sizeof(struct dpaa_if_stats) / 8); @@ -335,10 +386,13 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, static int dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, - __rte_unused unsigned int limit) + unsigned int limit) { unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); + if (limit < stat_cnt) + return stat_cnt; + if (xstats_names != NULL) for (i = 0; i < stat_cnt; i++) snprintf(xstats_names[i].name, @@ -366,7 +420,7 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, return 0; fman_if_stats_get_all(dpaa_intf->fif, values_copy, - sizeof(struct dpaa_if_stats)); + sizeof(struct dpaa_if_stats) / 8); for (i = 0; i < stat_cnt; i++) values[i] = @@ -463,7 +517,15 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, PMD_INIT_FUNC_TRACE(); - DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx); + if (queue_idx >= dev->data->nb_rx_queues) { + rte_errno = EOVERFLOW; + DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", + (void *)dev, queue_idx, dev->data->nb_rx_queues); + return -rte_errno; + } + + DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", + queue_idx, rxq->fqid); if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { struct fman_if_ic_params icp; @@ -527,9 +589,11 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; } ret = qman_init_fq(rxq, flags, &opts); - if (ret) - DPAA_PMD_ERR("Channel/Queue association failed. fqid %d" - " ret: %d", rxq->fqid, ret); + if (ret) { + DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x " + "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); + return ret; + } rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; rxq->is_static = true; @@ -553,7 +617,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return 0; } -int __rte_experimental +int dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, int eth_rx_queue_id, u16 ch_id, @@ -604,8 +668,8 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, ret = qman_init_fq(rxq, flags, &opts); if (ret) { - DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d", - rxq->fqid, ret); + DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x " + "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); return ret; } @@ -616,7 +680,7 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, return ret; } -int __rte_experimental +int dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, int eth_rx_queue_id) { @@ -662,7 +726,15 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, PMD_INIT_FUNC_TRACE(); - DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx); + if (queue_idx >= dev->data->nb_tx_queues) { + rte_errno = EOVERFLOW; + DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", + (void *)dev, queue_idx, dev->data->nb_tx_queues); + return -rte_errno; + } + + DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)", + queue_idx, dpaa_intf->tx_queues[queue_idx].fqid); dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx]; return 0; } @@ -813,7 +885,7 @@ dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, fman_if_clear_mac_addr(dpaa_intf->fif, index); } -static void +static int dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { @@ -825,6 +897,8 @@ dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0); if (ret) RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret); + + return ret; } static struct eth_dev_ops dpaa_devops = { @@ -882,7 +956,7 @@ is_dpaa_supported(struct rte_eth_dev *dev) return is_device_supported(dev, &rte_dpaa_pmd); } -int __rte_experimental +int rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) { struct rte_eth_dev *dev; @@ -953,15 +1027,15 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, ret = qman_reserve_fqid(fqid); if (ret) { - DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d", + DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d", fqid, ret); return -EINVAL; } - DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid); + DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid); ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); if (ret) { - DPAA_PMD_ERR("create rx fqid %d failed with ret: %d", + DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d", fqid, ret); return ret; } @@ -977,7 +1051,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, &cgr_opts); if (ret) { DPAA_PMD_WARN( - "rx taildrop init fail on rx fqid %d (ret=%d)", + "rx taildrop init fail on rx fqid 0x%x(ret=%d)", fqid, ret); goto without_cgr; } @@ -988,7 +1062,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, without_cgr: ret = qman_init_fq(fq, flags, &opts); if (ret) - DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret); + DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret); return ret; } @@ -1016,10 +1090,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq, /* no tx-confirmation */ opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; - DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid); + DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid); ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); if (ret) - DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret); + DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret); return ret; } @@ -1090,25 +1164,20 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->cfg = cfg; /* Initialize Rx FQ's */ - if (getenv("DPAA_NUM_RX_QUEUES")) - num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); - else + if (default_q) { num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; - - /* if push mode queues to be enabled. Currenly we are allowing only - * one queue per thread. - */ - if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { - dpaa_push_mode_max_queue = - atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); - if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) - dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; + } else { + if (getenv("DPAA_NUM_RX_QUEUES")) + num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); + else + num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; } - /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX + + /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX * queues. */ - if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) { + if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { DPAA_PMD_ERR("Invalid number of RX queues\n"); return -EINVAL; } @@ -1141,8 +1210,11 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) } for (loop = 0; loop < num_rx_fqs; loop++) { - fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid * - DPAA_PCD_FQID_MULTIPLIER + loop; + if (default_q) + fqid = cfg->rx_def; + else + fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid * + DPAA_PCD_FQID_MULTIPLIER + loop; if (dpaa_intf->cgr_rx) dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; @@ -1317,6 +1389,9 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); if (!eth_dev) return -ENOMEM; + eth_dev->device = &dpaa_dev->device; + eth_dev->dev_ops = &dpaa_devops; + rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -1335,6 +1410,26 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, return ret; } + if (access("/tmp/fmc.bin", F_OK) == -1) { + RTE_LOG(INFO, PMD, + "* FMC not configured.Enabling default mode\n"); + default_q = 1; + } + + /* disabling the default push mode for LS1043 */ + if (dpaa_svr_family == SVR_LS1043A_FAMILY) + dpaa_push_mode_max_queue = 0; + + /* if push mode queues to be enabled. Currenly we are allowing + * only one queue per thread. + */ + if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { + dpaa_push_mode_max_queue = + atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); + if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) + dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; + } + is_global_init = 1; } @@ -1366,8 +1461,10 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, /* Invoke PMD device initialization function */ diag = dpaa_dev_init(eth_dev); - if (diag == 0) + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); return 0; + } if (rte_eal_process_type() == RTE_PROC_PRIMARY) rte_free(eth_dev->data->dev_private); |