diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:52:30 +0100 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:53:17 +0100 |
commit | b63264c8342e6a1b6971c79550d2af2024b6a4de (patch) | |
tree | 83114aac64286fe616506c0b3dfaec2ab86ef835 /drivers/net/dpaa | |
parent | ca33590b6af032bff57d9cc70455660466a654b2 (diff) |
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/dpaa')
-rw-r--r-- | drivers/net/dpaa/Makefile | 3 | ||||
-rw-r--r-- | drivers/net/dpaa/dpaa_ethdev.c | 213 | ||||
-rw-r--r-- | drivers/net/dpaa/dpaa_ethdev.h | 24 | ||||
-rw-r--r-- | drivers/net/dpaa/dpaa_rxtx.c | 41 | ||||
-rw-r--r-- | drivers/net/dpaa/meson.build | 14 | ||||
-rw-r--r-- | drivers/net/dpaa/rte_pmd_dpaa.h | 5 | ||||
-rw-r--r-- | drivers/net/dpaa/rte_pmd_dpaa_version.map | 4 |
7 files changed, 214 insertions, 90 deletions
diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile index 9c2a5ea8..d7a0a50c 100644 --- a/drivers/net/dpaa/Makefile +++ b/drivers/net/dpaa/Makefile @@ -27,6 +27,9 @@ EXPORT_MAP := rte_pmd_dpaa_version.map LIBABIVER := 1 +# depends on dpaa bus which uses experimental API +CFLAGS += -DALLOW_EXPERIMENTAL_API + # Interfaces with DPDK SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index 9b69ef45..7a950ac0 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -45,14 +45,43 @@ #include <fsl_bman.h> #include <fsl_fman.h> +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_JUMBO_FRAME; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + /* Keep track of whether QMAN and BMAN have been globally initialized */ static int is_global_init; -/* At present we only allow up to 4 push mode queues - as each of this queue - * need dedicated portal and we are short of portals. +static int default_q; /* use default queue - FMC is not executed*/ +/* At present we only allow up to 4 push mode queues as default - as each of + * this queue need dedicated portal and we are short of portals. */ -#define DPAA_MAX_PUSH_MODE_QUEUE 4 +#define DPAA_MAX_PUSH_MODE_QUEUE 8 +#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4 -static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; +static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE; static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ @@ -95,6 +124,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { static struct rte_dpaa_driver rte_dpaa_pmd; +static void +dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); + static inline void dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) { @@ -122,9 +154,11 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) return -EINVAL; if (frame_size > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads &= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; @@ -134,13 +168,32 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } static int -dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +dpaa_eth_dev_configure(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + /* Rx offloads validation */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA_PMD_WARN( + "Rx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads validation */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA_PMD_WARN( + "Tx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= DPAA_MAX_RX_PKT_LEN) { fman_if_set_maxfrm(dpaa_intf->fif, @@ -256,14 +309,12 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev, dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G); - dev_info->rx_offload_capa = - (DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM); - dev_info->tx_offload_capa = - (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM); + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; + dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; + dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; } static int dpaa_eth_link_update(struct rte_eth_dev *dev, @@ -275,9 +326,9 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (dpaa_intf->fif->mac_type == fman_mac_1g) - link->link_speed = 1000; + link->link_speed = ETH_SPEED_NUM_1G; else if (dpaa_intf->fif->mac_type == fman_mac_10g) - link->link_speed = 10000; + link->link_speed = ETH_SPEED_NUM_10G; else DPAA_PMD_ERR("invalid link_speed: %s, %d", dpaa_intf->name, dpaa_intf->fif->mac_type); @@ -316,12 +367,12 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); uint64_t values[sizeof(struct dpaa_if_stats) / 8]; - if (xstats == NULL) - return 0; - if (n < num) return num; + if (xstats == NULL) + return 0; + fman_if_stats_get_all(dpaa_intf->fif, values, sizeof(struct dpaa_if_stats) / 8); @@ -335,10 +386,13 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, static int dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, - __rte_unused unsigned int limit) + unsigned int limit) { unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); + if (limit < stat_cnt) + return stat_cnt; + if (xstats_names != NULL) for (i = 0; i < stat_cnt; i++) snprintf(xstats_names[i].name, @@ -366,7 +420,7 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, return 0; fman_if_stats_get_all(dpaa_intf->fif, values_copy, - sizeof(struct dpaa_if_stats)); + sizeof(struct dpaa_if_stats) / 8); for (i = 0; i < stat_cnt; i++) values[i] = @@ -463,7 +517,15 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, PMD_INIT_FUNC_TRACE(); - DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx); + if (queue_idx >= dev->data->nb_rx_queues) { + rte_errno = EOVERFLOW; + DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", + (void *)dev, queue_idx, dev->data->nb_rx_queues); + return -rte_errno; + } + + DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", + queue_idx, rxq->fqid); if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { struct fman_if_ic_params icp; @@ -527,9 +589,11 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; } ret = qman_init_fq(rxq, flags, &opts); - if (ret) - DPAA_PMD_ERR("Channel/Queue association failed. fqid %d" - " ret: %d", rxq->fqid, ret); + if (ret) { + DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x " + "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); + return ret; + } rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; rxq->is_static = true; @@ -553,7 +617,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return 0; } -int __rte_experimental +int dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, int eth_rx_queue_id, u16 ch_id, @@ -604,8 +668,8 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, ret = qman_init_fq(rxq, flags, &opts); if (ret) { - DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d", - rxq->fqid, ret); + DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x " + "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); return ret; } @@ -616,7 +680,7 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, return ret; } -int __rte_experimental +int dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, int eth_rx_queue_id) { @@ -662,7 +726,15 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, PMD_INIT_FUNC_TRACE(); - DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx); + if (queue_idx >= dev->data->nb_tx_queues) { + rte_errno = EOVERFLOW; + DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", + (void *)dev, queue_idx, dev->data->nb_tx_queues); + return -rte_errno; + } + + DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)", + queue_idx, dpaa_intf->tx_queues[queue_idx].fqid); dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx]; return 0; } @@ -813,7 +885,7 @@ dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, fman_if_clear_mac_addr(dpaa_intf->fif, index); } -static void +static int dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { @@ -825,6 +897,8 @@ dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0); if (ret) RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret); + + return ret; } static struct eth_dev_ops dpaa_devops = { @@ -882,7 +956,7 @@ is_dpaa_supported(struct rte_eth_dev *dev) return is_device_supported(dev, &rte_dpaa_pmd); } -int __rte_experimental +int rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) { struct rte_eth_dev *dev; @@ -953,15 +1027,15 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, ret = qman_reserve_fqid(fqid); if (ret) { - DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d", + DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d", fqid, ret); return -EINVAL; } - DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid); + DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid); ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); if (ret) { - DPAA_PMD_ERR("create rx fqid %d failed with ret: %d", + DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d", fqid, ret); return ret; } @@ -977,7 +1051,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, &cgr_opts); if (ret) { DPAA_PMD_WARN( - "rx taildrop init fail on rx fqid %d (ret=%d)", + "rx taildrop init fail on rx fqid 0x%x(ret=%d)", fqid, ret); goto without_cgr; } @@ -988,7 +1062,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, without_cgr: ret = qman_init_fq(fq, flags, &opts); if (ret) - DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret); + DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret); return ret; } @@ -1016,10 +1090,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq, /* no tx-confirmation */ opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; - DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid); + DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid); ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); if (ret) - DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret); + DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret); return ret; } @@ -1090,25 +1164,20 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->cfg = cfg; /* Initialize Rx FQ's */ - if (getenv("DPAA_NUM_RX_QUEUES")) - num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); - else + if (default_q) { num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; - - /* if push mode queues to be enabled. Currenly we are allowing only - * one queue per thread. - */ - if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { - dpaa_push_mode_max_queue = - atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); - if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) - dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; + } else { + if (getenv("DPAA_NUM_RX_QUEUES")) + num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); + else + num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; } - /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX + + /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX * queues. */ - if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) { + if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { DPAA_PMD_ERR("Invalid number of RX queues\n"); return -EINVAL; } @@ -1141,8 +1210,11 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) } for (loop = 0; loop < num_rx_fqs; loop++) { - fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid * - DPAA_PCD_FQID_MULTIPLIER + loop; + if (default_q) + fqid = cfg->rx_def; + else + fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid * + DPAA_PCD_FQID_MULTIPLIER + loop; if (dpaa_intf->cgr_rx) dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; @@ -1317,6 +1389,9 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); if (!eth_dev) return -ENOMEM; + eth_dev->device = &dpaa_dev->device; + eth_dev->dev_ops = &dpaa_devops; + rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -1335,6 +1410,26 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, return ret; } + if (access("/tmp/fmc.bin", F_OK) == -1) { + RTE_LOG(INFO, PMD, + "* FMC not configured.Enabling default mode\n"); + default_q = 1; + } + + /* disabling the default push mode for LS1043 */ + if (dpaa_svr_family == SVR_LS1043A_FAMILY) + dpaa_push_mode_max_queue = 0; + + /* if push mode queues to be enabled. Currenly we are allowing + * only one queue per thread. + */ + if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { + dpaa_push_mode_max_queue = + atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); + if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) + dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; + } + is_global_init = 1; } @@ -1366,8 +1461,10 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, /* Invoke PMD device initialization function */ diag = dpaa_dev_init(eth_dev); - if (diag == 0) + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); return 0; + } if (rte_eal_process_type() == RTE_PROC_PRIMARY) rte_free(eth_dev->data->dev_private); diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h index c051ae32..c79b9f86 100644 --- a/drivers/net/dpaa/dpaa_ethdev.h +++ b/drivers/net/dpaa/dpaa_ethdev.h @@ -51,6 +51,10 @@ /*Maximum number of slots available in TX ring*/ #define DPAA_TX_BURST_SIZE 7 +/* Optimal burst size for RX and TX as default */ +#define DPAA_DEF_RX_BURST_SIZE 7 +#define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE + #ifndef VLAN_TAG_SIZE #define VLAN_TAG_SIZE 4 /** < Vlan Header Length */ #endif @@ -74,14 +78,10 @@ #define DPAA_DEBUG_FQ_TX_ERROR 1 #define DPAA_RSS_OFFLOAD_ALL ( \ - ETH_RSS_FRAG_IPV4 | \ - ETH_RSS_NONFRAG_IPV4_TCP | \ - ETH_RSS_NONFRAG_IPV4_UDP | \ - ETH_RSS_NONFRAG_IPV4_SCTP | \ - ETH_RSS_FRAG_IPV6 | \ - ETH_RSS_NONFRAG_IPV6_TCP | \ - ETH_RSS_NONFRAG_IPV6_UDP | \ - ETH_RSS_NONFRAG_IPV6_SCTP) + ETH_RSS_IP | \ + ETH_RSS_UDP | \ + ETH_RSS_TCP | \ + ETH_RSS_SCTP) #define DPAA_TX_CKSUM_OFFLOAD_MASK ( \ PKT_TX_IP_CKSUM | \ @@ -160,12 +160,14 @@ struct dpaa_if_stats { uint64_t tund; /**<Tx Undersized */ }; -int __rte_experimental dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, - int eth_rx_queue_id, +int +dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, + int eth_rx_queue_id, u16 ch_id, const struct rte_event_eth_rx_adapter_queue_conf *queue_conf); -int __rte_experimental dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, +int +dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, int eth_rx_queue_id); enum qman_cb_dqrr_result diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c index 0dea8e79..168b77e4 100644 --- a/drivers/net/dpaa/dpaa_rxtx.c +++ b/drivers/net/dpaa/dpaa_rxtx.c @@ -59,7 +59,7 @@ } while (0) #if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER) -void dpaa_display_frame(const struct qm_fd *fd) +static void dpaa_display_frame(const struct qm_fd *fd) { int ii; char *ptr; @@ -90,11 +90,10 @@ static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused, /*TBD:XXX: to be implemented*/ } -static inline void dpaa_eth_packet_info(struct rte_mbuf *m, - uint64_t fd_virt_addr) +static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr) { struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr); - uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK; + uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK; DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); @@ -351,7 +350,7 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) prev_seg = cur_seg; } - dpaa_eth_packet_info(first_seg, (uint64_t)vaddr); + dpaa_eth_packet_info(first_seg, vaddr); rte_pktmbuf_free_seg(temp); return first_seg; @@ -394,7 +393,7 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) mbuf->ol_flags = 0; mbuf->next = NULL; rte_mbuf_refcnt_set(mbuf, 1); - dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr); + dpaa_eth_packet_info(mbuf, mbuf->buf_addr); return mbuf; } @@ -432,7 +431,7 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, } fd = &dqrr[i]->fd; - dpaa_intf = fq[i]->dpaa_intf; + dpaa_intf = fq[0]->dpaa_intf; format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; @@ -455,7 +454,7 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, mbuf->ol_flags = 0; mbuf->next = NULL; rte_mbuf_refcnt_set(mbuf, 1); - dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr); + dpaa_eth_packet_info(mbuf, mbuf->buf_addr); } } @@ -561,7 +560,8 @@ uint16_t dpaa_eth_queue_rx(void *q, struct qman_fq *fq = q; struct qm_dqrr_entry *dq; uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; - int ret; + int num_rx_bufs, ret; + uint32_t vdqcr_flags = 0; if (likely(fq->is_static)) return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs); @@ -574,8 +574,19 @@ uint16_t dpaa_eth_queue_rx(void *q, } } - ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ? - DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs); + /* Until request for four buffers, we provide exact number of buffers. + * Otherwise we do not set the QM_VDQCR_EXACT flag. + * Not setting QM_VDQCR_EXACT flag can provide two more buffers than + * requested, so we request two less in this case. + */ + if (nb_bufs < 4) { + vdqcr_flags = QM_VDQCR_EXACT; + num_rx_bufs = nb_bufs; + } else { + num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ? + (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2); + } + ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); if (ret) return 0; @@ -593,7 +604,7 @@ uint16_t dpaa_eth_queue_rx(void *q, static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info) { int ret; - uint64_t buf = 0; + size_t buf = 0; struct bm_buffer bufs; ret = bman_acquire(bp_info->bp, &bufs, 1, 0); @@ -602,10 +613,10 @@ static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info) return (void *)buf; } - DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d", + DPAA_DP_LOG(DEBUG, "got buffer 0x%" PRIx64 " from pool %d", (uint64_t)bufs.addr, bufs.bpid); - buf = (uint64_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr) + buf = (size_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr) - bp_info->meta_data_size; if (!buf) goto out; @@ -826,6 +837,8 @@ tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf, } DPAA_MBUF_TO_CONTIG_FD(dmable_mbuf, fd_arr, dpaa_intf->bp_info->bpid); + if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) + dpaa_unsegmented_checksum(mbuf, fd_arr); return 0; } diff --git a/drivers/net/dpaa/meson.build b/drivers/net/dpaa/meson.build new file mode 100644 index 00000000..62dec7b0 --- /dev/null +++ b/drivers/net/dpaa/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif +deps += ['mempool_dpaa'] + +sources = files('dpaa_ethdev.c', + 'dpaa_rxtx.c') + +allow_experimental_apis = true + +install_headers('rte_pmd_dpaa.h') diff --git a/drivers/net/dpaa/rte_pmd_dpaa.h b/drivers/net/dpaa/rte_pmd_dpaa.h index 38405ec0..37eea9b0 100644 --- a/drivers/net/dpaa/rte_pmd_dpaa.h +++ b/drivers/net/dpaa/rte_pmd_dpaa.h @@ -18,9 +18,6 @@ #include <rte_ethdev_driver.h> /** - * @warning - * @b EXPERIMENTAL: this API may change, or be removed, without prior notice - * * Enable/Disable TX loopback * * @param port @@ -33,7 +30,7 @@ * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int __rte_experimental +int rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on); #endif /* _PMD_DPAA_H_ */ diff --git a/drivers/net/dpaa/rte_pmd_dpaa_version.map b/drivers/net/dpaa/rte_pmd_dpaa_version.map index 3b937b10..8cb4500b 100644 --- a/drivers/net/dpaa/rte_pmd_dpaa_version.map +++ b/drivers/net/dpaa/rte_pmd_dpaa_version.map @@ -3,12 +3,10 @@ DPDK_17.11 { local: *; }; -EXPERIMENTAL { +DPDK_18.08 { global: dpaa_eth_eventq_attach; dpaa_eth_eventq_detach; rte_pmd_dpaa_set_tx_loopback; - - local: *; } DPDK_17.11; |