summaryrefslogtreecommitdiffstats
path: root/drivers/net/vmxnet3/vmxnet3_rxtx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/vmxnet3/vmxnet3_rxtx.c')
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c148
1 files changed, 112 insertions, 36 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 93db10fb..e865c675 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -69,6 +69,7 @@
#include <rte_sctp.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
+#include <rte_net.h>
#include "base/vmxnet3_defs.h"
#include "vmxnet3_ring.h"
@@ -76,6 +77,14 @@
#include "vmxnet3_logs.h"
#include "vmxnet3_ethdev.h"
+#define VMXNET3_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
+
static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
@@ -192,6 +201,8 @@ vmxnet3_dev_tx_queue_release(void *txq)
vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
/* Release the cmd_ring */
vmxnet3_cmd_ring_release(&tq->cmd_ring);
+ /* Release the memzone */
+ rte_memzone_free(tq->mz);
}
}
@@ -209,6 +220,9 @@ vmxnet3_dev_rx_queue_release(void *rxq)
/* Release both the cmd_rings */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
+
+ /* Release the memzone */
+ rte_memzone_free(rq->mz);
}
}
@@ -235,7 +249,7 @@ vmxnet3_dev_tx_queue_reset(void *txq)
size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
- size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+ size += tq->txdata_desc_size * data_ring->size;
memset(ring->base, 0, size);
}
@@ -245,8 +259,10 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
{
int i;
vmxnet3_rx_queue_t *rq = rxq;
+ struct vmxnet3_hw *hw = rq->hw;
struct vmxnet3_cmd_ring *ring0, *ring1;
struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
int size;
if (rq != NULL) {
@@ -271,6 +287,8 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ if (VMXNET3_VERSION_GE_3(hw) && rq->data_desc_size)
+ size += rq->data_desc_size * data_ring->size;
memset(ring0->base, 0, size);
}
@@ -350,6 +368,53 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
}
uint16_t
+vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint32_t i;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* Non-TSO packet cannot occupy more than
+ * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
+ */
+ if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
+ m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ /* check that only supported TX offloads are requested. */
+ if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -415,10 +480,13 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
if (txm->nb_segs == 1 &&
- rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+ rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
struct Vmxnet3_TxDataDesc *tdd;
- tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
+ tdd = (struct Vmxnet3_TxDataDesc *)
+ ((uint8 *)txq->data_ring.base +
+ txq->cmd_ring.next2fill *
+ txq->txdata_desc_size);
copy_size = rte_pktmbuf_pkt_len(txm);
rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
}
@@ -435,12 +503,15 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* maximum size of mbuf segment size.
*/
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
- if (copy_size)
- gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
- txq->cmd_ring.next2fill *
- sizeof(struct Vmxnet3_TxDataDesc));
- else
+ if (copy_size) {
+ uint64 offset = txq->cmd_ring.next2fill *
+ txq->txdata_desc_size;
+ gdesc->txd.addr =
+ rte_cpu_to_le_64(txq->data_ring.basePA +
+ offset);
+ } else {
gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+ }
gdesc->dword[2] = dw2 | m_seg->data_len;
gdesc->dword[3] = 0;
@@ -696,7 +767,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
idx = rcd->rxdIdx;
- ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
+ ring_idx = vmxnet3_get_ring_idx(hw, rcd->rqID);
rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
RTE_SET_USED(rxd); /* used only for assert when enabled */
rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
@@ -762,6 +833,15 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
goto rcd_done;
}
+ if (vmxnet3_rx_data_ring(hw, rcd->rqID)) {
+ uint8_t *rdd = rxq->data_ring.base +
+ idx * rxq->data_desc_size;
+
+ RTE_ASSERT(VMXNET3_VERSION_GE_3(hw));
+ rte_memcpy(rte_pktmbuf_mtod(rxm, char *),
+ rdd, rcd->len);
+ }
+
rxq->start_seg = rxm;
vmxnet3_rx_offload(rcd, rxm);
} else {
@@ -816,30 +896,6 @@ rcd_done:
return nb_rx;
}
-/*
- * Create memzone for device rings. malloc can't be used as the physical address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
- */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name, ring_name,
- dev->data->port_id, queue_id);
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, VMXNET3_RING_BA_ALIGN);
-}
-
int
vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -876,6 +932,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->hw = hw;
txq->qid = queue_idx;
txq->stopped = TRUE;
+ txq->txdata_desc_size = hw->txdata_desc_size;
ring = &txq->cmd_ring;
comp_ring = &txq->comp_ring;
@@ -905,13 +962,15 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
- size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+ size += txq->txdata_desc_size * data_ring->size;
- mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
+ mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
}
+ txq->mz = mz;
memset(mz->addr, 0, mz->len);
/* cmd_ring initialization */
@@ -955,6 +1014,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct vmxnet3_hw *hw = dev->data->dev_private;
struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_rx_data_ring *data_ring;
int size;
uint8_t i;
char mem_name[32];
@@ -975,11 +1035,14 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->hw = hw;
rxq->qid1 = queue_idx;
rxq->qid2 = queue_idx + hw->num_rx_queues;
+ rxq->data_ring_qid = queue_idx + 2 * hw->num_rx_queues;
+ rxq->data_desc_size = hw->rxdata_desc_size;
rxq->stopped = TRUE;
ring0 = &rxq->cmd_ring[0];
ring1 = &rxq->cmd_ring[1];
comp_ring = &rxq->comp_ring;
+ data_ring = &rxq->data_ring;
/* Rx vmxnet rings length should be between 256-4096 */
if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
@@ -995,6 +1058,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
comp_ring->size = ring0->size + ring1->size;
+ data_ring->size = ring0->size;
/* Rx vmxnet rings structure initialization */
ring0->next2fill = 0;
@@ -1008,12 +1072,16 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size)
+ size += rxq->data_desc_size * data_ring->size;
- mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
+ mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
}
+ rxq->mz = mz;
memset(mz->addr, 0, mz->len);
/* cmd_ring0 initialization */
@@ -1029,6 +1097,14 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
ring1->size;
+ /* data_ring initialization */
+ if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size) {
+ data_ring->base =
+ (uint8_t *)(comp_ring->base + comp_ring->size);
+ data_ring->basePA = comp_ring->basePA +
+ sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ }
+
/* cmd_ring0-cmd_ring1 buf_info allocation */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {