aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/sfc_ef10_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc/sfc_ef10_tx.c')
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c81
1 files changed, 69 insertions, 12 deletions
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index da2b5edb..0454e79a 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -158,17 +158,35 @@ sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
pending += sfc_ef10_tx_process_events(txq);
if (pending != completed) {
+ struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
+ unsigned int nb = 0;
+
do {
struct sfc_ef10_tx_sw_desc *txd;
+ struct rte_mbuf *m;
txd = &txq->sw_ring[completed & ptr_mask];
+ if (txd->mbuf == NULL)
+ continue;
- if (txd->mbuf != NULL) {
- rte_pktmbuf_free(txd->mbuf);
- txd->mbuf = NULL;
+ m = rte_pktmbuf_prefree_seg(txd->mbuf);
+ txd->mbuf = NULL;
+ if (m == NULL)
+ continue;
+
+ if ((nb == RTE_DIM(bulk)) ||
+ ((nb != 0) && (m->pool != bulk[0]->pool))) {
+ rte_mempool_put_bulk(bulk[0]->pool,
+ (void *)bulk, nb);
+ nb = 0;
}
+
+ bulk[nb++] = m;
} while (++completed != pending);
+ if (nb != 0)
+ rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
+
txq->completed = completed;
}
@@ -177,7 +195,7 @@ sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
}
static void
-sfc_ef10_tx_qdesc_dma_create(phys_addr_t addr, uint16_t size, bool eop,
+sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop,
efx_qword_t *edp)
{
EFX_POPULATE_QWORD_4(*edp,
@@ -323,8 +341,9 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
pkt_len = m_seg->pkt_len;
do {
- phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
+ rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg);
unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
+ unsigned int id = added & ptr_mask;
SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
@@ -332,15 +351,30 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
sfc_ef10_tx_qdesc_dma_create(seg_addr,
seg_len, (pkt_len == 0),
- &txq->txq_hw_ring[added & ptr_mask]);
+ &txq->txq_hw_ring[id]);
+
+ /*
+ * rte_pktmbuf_free() is commonly used in DPDK for
+ * recycling packets - the function checks every
+ * segment's reference counter and returns the
+ * buffer to its pool whenever possible;
+ * nevertheless, freeing mbuf segments one by one
+ * may entail some performance decline;
+ * from this point, sfc_efx_tx_reap() does the same job
+ * on its own and frees buffers in bulks (all mbufs
+ * within a bulk belong to the same pool);
+ * from this perspective, individual segment pointers
+ * must be associated with the corresponding SW
+ * descriptors independently so that only one loop
+ * is sufficient on reap to inspect all the buffers
+ */
+ txq->sw_ring[id].mbuf = m_seg;
+
++added;
} while ((m_seg = m_seg->next) != 0);
dma_desc_space -= (added - pkt_start);
-
- /* Assign mbuf to the last used desc */
- txq->sw_ring[(added - 1) & ptr_mask].mbuf = *pktp;
}
if (likely(added != txq->added)) {
@@ -367,14 +401,25 @@ sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
pending += sfc_ef10_tx_process_events(txq);
if (pending != completed) {
+ struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
+ unsigned int nb = 0;
+
do {
struct sfc_ef10_tx_sw_desc *txd;
txd = &txq->sw_ring[completed & ptr_mask];
- rte_pktmbuf_free_seg(txd->mbuf);
+ if (nb == RTE_DIM(bulk)) {
+ rte_mempool_put_bulk(bulk[0]->pool,
+ (void *)bulk, nb);
+ nb = 0;
+ }
+
+ bulk[nb++] = txd->mbuf;
} while (++completed != pending);
+ rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
+
txq->completed = completed;
}
@@ -419,7 +464,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
SFC_EF10_TX_DMA_DESC_LEN_MAX);
- sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_dma_addr(pkt),
+ sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
rte_pktmbuf_data_len(pkt),
true, &txq->txq_hw_ring[id]);
@@ -557,7 +602,7 @@ sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
txd = &txq->sw_ring[completed & txq->ptr_mask];
if (txd->mbuf != NULL) {
- rte_pktmbuf_free(txd->mbuf);
+ rte_pktmbuf_free_seg(txd->mbuf);
txd->mbuf = NULL;
}
}
@@ -565,6 +610,14 @@ sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
txq->flags &= ~SFC_EF10_TXQ_STARTED;
}
+static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
+static int
+sfc_ef10_tx_qdesc_status(__rte_unused struct sfc_dp_txq *dp_txq,
+ __rte_unused uint16_t offset)
+{
+ return -ENOTSUP;
+}
+
struct sfc_dp_tx sfc_ef10_tx = {
.dp = {
.name = SFC_KVARG_DATAPATH_EF10,
@@ -572,6 +625,8 @@ struct sfc_dp_tx sfc_ef10_tx = {
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
.features = SFC_DP_TX_FEAT_MULTI_SEG |
+ SFC_DP_TX_FEAT_MULTI_POOL |
+ SFC_DP_TX_FEAT_REFCNT |
SFC_DP_TX_FEAT_MULTI_PROCESS,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,
@@ -579,6 +634,7 @@ struct sfc_dp_tx sfc_ef10_tx = {
.qtx_ev = sfc_ef10_tx_qtx_ev,
.qstop = sfc_ef10_tx_qstop,
.qreap = sfc_ef10_tx_qreap,
+ .qdesc_status = sfc_ef10_tx_qdesc_status,
.pkt_burst = sfc_ef10_xmit_pkts,
};
@@ -594,5 +650,6 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
.qtx_ev = sfc_ef10_tx_qtx_ev,
.qstop = sfc_ef10_tx_qstop,
.qreap = sfc_ef10_tx_qreap,
+ .qdesc_status = sfc_ef10_tx_qdesc_status,
.pkt_burst = sfc_ef10_simple_xmit_pkts,
};