aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/sfc_tx.c
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:15:11 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:45:54 +0000
commit055c52583a2794da8ba1e85a48cce3832372b12f (patch)
tree8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /drivers/net/sfc/sfc_tx.c
parentf239aed5e674965691846e8ce3f187dd47523689 (diff)
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/sfc/sfc_tx.c')
-rw-r--r--drivers/net/sfc/sfc_tx.c58
1 files changed, 57 insertions, 1 deletions
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index fc439cb6..127d59e6 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -91,6 +91,21 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
rc = EINVAL;
}
+ if (((flags & ETH_TXQ_FLAGS_NOMULTMEMP) == 0) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)) {
+ sfc_err(sa, "multi-mempool is not supported by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+
+ if (((flags & ETH_TXQ_FLAGS_NOREFCOUNT) == 0) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)) {
+ sfc_err(sa,
+ "mbuf reference counters are neglected by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+
if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
if (!encp->enc_hw_tx_insert_vlan_enabled) {
sfc_err(sa, "VLAN offload is not supported");
@@ -750,7 +765,7 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
size_t seg_len;
seg_len = m_seg->data_len;
- next_frag = rte_mbuf_data_dma_addr(m_seg);
+ next_frag = rte_mbuf_data_iova(m_seg);
/*
* If we've started TSO transaction few steps earlier,
@@ -977,6 +992,44 @@ sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
}
+static sfc_dp_tx_qdesc_status_t sfc_efx_tx_qdesc_status;
+static int
+sfc_efx_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ if (unlikely(offset > txq->ptr_mask))
+ return -EINVAL;
+
+ if (unlikely(offset >= EFX_TXQ_LIMIT(txq->ptr_mask + 1)))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+
+ /*
+ * Poll EvQ to derive up-to-date 'txq->pending' figure;
+ * it is required for the queue to be running, but the
+ * check is omitted because API design assumes that it
+ * is the duty of the caller to satisfy all conditions
+ */
+ SFC_ASSERT((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) ==
+ SFC_EFX_TXQ_FLAG_RUNNING);
+ sfc_ev_qpoll(txq->evq);
+
+ /*
+ * Ring tail is 'txq->pending', and although descriptors
+ * between 'txq->completed' and 'txq->pending' are still
+ * in use by the driver, they should be reported as DONE
+ */
+ if (unlikely(offset < (txq->added - txq->pending)))
+ return RTE_ETH_TX_DESC_FULL;
+
+ /*
+ * There is no separate return value for unused descriptors;
+ * the latter will be reported as DONE because genuine DONE
+ * descriptors will be freed anyway in SW on the next burst
+ */
+ return RTE_ETH_TX_DESC_DONE;
+}
+
struct sfc_dp_tx sfc_efx_tx = {
.dp = {
.name = SFC_KVARG_DATAPATH_EFX,
@@ -985,11 +1038,14 @@ struct sfc_dp_tx sfc_efx_tx = {
},
.features = SFC_DP_TX_FEAT_VLAN_INSERT |
SFC_DP_TX_FEAT_TSO |
+ SFC_DP_TX_FEAT_MULTI_POOL |
+ SFC_DP_TX_FEAT_REFCNT |
SFC_DP_TX_FEAT_MULTI_SEG,
.qcreate = sfc_efx_tx_qcreate,
.qdestroy = sfc_efx_tx_qdestroy,
.qstart = sfc_efx_tx_qstart,
.qstop = sfc_efx_tx_qstop,
.qreap = sfc_efx_tx_qreap,
+ .qdesc_status = sfc_efx_tx_qdesc_status,
.pkt_burst = sfc_efx_xmit_pkts,
};