summaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio/virtio_rxtx_simple.h
blob: 303904d64e14f692ae0d482238c769230ada43f5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2010-2016 Intel Corporation
 */

#ifndef _VIRTIO_RXTX_SIMPLE_H_
#define _VIRTIO_RXTX_SIMPLE_H_

#include <stdint.h>

#include "virtio_logs.h"
#include "virtio_ethdev.h"
#include "virtqueue.h"
#include "virtio_rxtx.h"

#define RTE_VIRTIO_VPMD_RX_BURST 32
#define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST

static inline void
virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
{
	int i;
	uint16_t desc_idx;
	struct rte_mbuf **sw_ring;
	struct vring_desc *start_dp;
	int ret;
	struct virtqueue *vq = rxvq->vq;

	desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
	sw_ring = &vq->sw_ring[desc_idx];
	start_dp = &vq->vq_ring.desc[desc_idx];

	ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
		RTE_VIRTIO_VPMD_RX_REARM_THRESH);
	if (unlikely(ret)) {
		rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed +=
			RTE_VIRTIO_VPMD_RX_REARM_THRESH;
		return;
	}

	for (i = 0; i < RTE_VIRTIO_VPMD_RX_REARM_THRESH; i++) {
		uintptr_t p;

		p = (uintptr_t)&sw_ring[i]->rearm_data;
		*(uint64_t *)p = rxvq->mbuf_initializer;

		start_dp[i].addr =
			VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
			RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
		start_dp[i].len = sw_ring[i]->buf_len -
			RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
	}

	vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
	vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
	vq_update_avail_idx(vq);
}

#define VIRTIO_TX_FREE_THRESH 32
#define VIRTIO_TX_MAX_FREE_BUF_SZ 32
#define VIRTIO_TX_FREE_NR 32
/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
static inline void
virtio_xmit_cleanup_simple(struct virtqueue *vq)
{
	uint16_t i, desc_idx;
	uint32_t nb_free = 0;
	struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];

	desc_idx = (uint16_t)(vq->vq_used_cons_idx &
		   ((vq->vq_nentries >> 1) - 1));
	m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
	m = rte_pktmbuf_prefree_seg(m);
	if (likely(m != NULL)) {
		free[0] = m;
		nb_free = 1;
		for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
			m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
			m = rte_pktmbuf_prefree_seg(m);
			if (likely(m != NULL)) {
				if (likely(m->pool == free[0]->pool))
					free[nb_free++] = m;
				else {
					rte_mempool_put_bulk(free[0]->pool,
						(void **)free,
						RTE_MIN(RTE_DIM(free),
							nb_free));
					free[0] = m;
					nb_free = 1;
				}
			}
		}
		rte_mempool_put_bulk(free[0]->pool, (void **)free,
			RTE_MIN(RTE_DIM(free), nb_free));
	} else {
		for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
			m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
			m = rte_pktmbuf_prefree_seg(m);
			if (m != NULL)
				rte_mempool_put(m->pool, m);
		}
	}

	vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR;
	vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1);
}

#endif /* _VIRTIO_RXTX_SIMPLE_H_ */