aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/thunderx/nicvf_rxtx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/thunderx/nicvf_rxtx.c')
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c46
1 files changed, 18 insertions, 28 deletions
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index fc43b747..6cae8341 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -430,9 +430,9 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
union cq_entry_t *desc = rxq->desc;
const uint64_t cqe_mask = rxq->qlen_mask;
uint64_t rb0_ptr, mbuf_phys_off = rxq->mbuf_phys_off;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
uint32_t cqe_head = rxq->head & cqe_mask;
int32_t available_space = rxq->available_space;
- uint8_t port_id = rxq->port_id;
const uint8_t rbptr_offset = rxq->rbptr_offset;
to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
@@ -448,17 +448,12 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
-
pkt->ol_flags = 0;
- pkt->port = port_id;
pkt->data_len = cqe_rx_w3.rb0_sz;
- pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
- pkt->nb_segs = 1;
pkt->pkt_len = cqe_rx_w3.rb0_sz;
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
-
+ nicvf_mbuff_init_update(pkt, mbuf_init, cqe_rx_w1.align_pad);
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
- rte_mbuf_refcnt_set(pkt, 1);
rx_pkts[i] = pkt;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(pkt);
@@ -469,11 +464,10 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += to_process;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -= nicvf_fill_rbdr(rxq,
- rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;
@@ -481,8 +475,9 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
static inline uint16_t __hot
nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
- uint64_t mbuf_phys_off, uint8_t port_id,
- struct rte_mbuf **rx_pkt, uint8_t rbptr_offset)
+ uint64_t mbuf_phys_off,
+ struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
+ uint64_t mbuf_init)
{
struct rte_mbuf *pkt, *seg, *prev;
cqe_rx_word0_t cqe_rx_w0;
@@ -501,12 +496,10 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
pkt->ol_flags = 0;
- pkt->port = port_id;
- pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
- pkt->nb_segs = nb_segs;
pkt->pkt_len = cqe_rx_w1.pkt_len;
pkt->data_len = rb_sz[nicvf_frag_num(0)];
- rte_mbuf_refcnt_set(pkt, 1);
+ nicvf_mbuff_init_mseg_update(
+ pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
@@ -518,9 +511,7 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
prev->next = seg;
seg->data_len = rb_sz[nicvf_frag_num(seg_idx)];
- seg->port = port_id;
- seg->data_off = RTE_PKTMBUF_HEADROOM;
- rte_mbuf_refcnt_set(seg, 1);
+ nicvf_mbuff_init_update(seg, mbuf_init, 0);
prev = seg;
}
@@ -541,7 +532,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
uint32_t i, to_process, cqe_head, buffers_consumed = 0;
int32_t available_space = rxq->available_space;
uint16_t nb_segs;
- const uint8_t port_id = rxq->port_id;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
const uint8_t rbptr_offset = rxq->rbptr_offset;
cqe_head = rxq->head & cqe_mask;
@@ -552,7 +543,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
cq_entry = &desc[cqe_head];
cqe_rx = (struct cqe_rx_t *)cq_entry;
nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
- port_id, rx_pkts + i, rbptr_offset);
+ rx_pkts + i, rbptr_offset, mbuf_init);
buffers_consumed += nb_segs;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(rx_pkts[i]);
@@ -563,11 +554,10 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += buffers_consumed;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -=
- nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;