aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/thunderx/nicvf_ethdev.h
blob: c0bfbf84890e97dc5afb0a11323b624e9fd98288 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2016 Cavium, Inc
 */

#ifndef __THUNDERX_NICVF_ETHDEV_H__
#define __THUNDERX_NICVF_ETHDEV_H__

#include <rte_ethdev_driver.h>

#define THUNDERX_NICVF_PMD_VERSION      "2.0"
#define THUNDERX_REG_BYTES		8

#define NICVF_INTR_POLL_INTERVAL_MS	50
#define NICVF_HALF_DUPLEX		0x00
#define NICVF_FULL_DUPLEX		0x01
#define NICVF_UNKNOWN_DUPLEX		0xff

#define NICVF_RSS_OFFLOAD_PASS1 ( \
	ETH_RSS_PORT | \
	ETH_RSS_IPV4 | \
	ETH_RSS_NONFRAG_IPV4_TCP | \
	ETH_RSS_NONFRAG_IPV4_UDP | \
	ETH_RSS_IPV6 | \
	ETH_RSS_NONFRAG_IPV6_TCP | \
	ETH_RSS_NONFRAG_IPV6_UDP)

#define NICVF_RSS_OFFLOAD_TUNNEL ( \
	ETH_RSS_VXLAN | \
	ETH_RSS_GENEVE | \
	ETH_RSS_NVGRE)

#define NICVF_TX_OFFLOAD_CAPA ( \
	DEV_TX_OFFLOAD_IPV4_CKSUM       | \
	DEV_TX_OFFLOAD_UDP_CKSUM        | \
	DEV_TX_OFFLOAD_TCP_CKSUM        | \
	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
	DEV_TX_OFFLOAD_MBUF_FAST_FREE   | \
	DEV_TX_OFFLOAD_MULTI_SEGS)

#define NICVF_RX_OFFLOAD_CAPA ( \
	DEV_RX_OFFLOAD_CHECKSUM    | \
	DEV_RX_OFFLOAD_VLAN_STRIP  | \
	DEV_RX_OFFLOAD_JUMBO_FRAME | \
	DEV_RX_OFFLOAD_SCATTER)

#define NICVF_DEFAULT_RX_FREE_THRESH    224
#define NICVF_DEFAULT_TX_FREE_THRESH    224
#define NICVF_TX_FREE_MPOOL_THRESH      16
#define NICVF_MAX_RX_FREE_THRESH        1024
#define NICVF_MAX_TX_FREE_THRESH        1024

#define VLAN_TAG_SIZE                   4	/* 802.3ac tag */

#define SKIP_DATA_BYTES "skip_data_bytes"
static inline struct nicvf *
nicvf_pmd_priv(struct rte_eth_dev *eth_dev)
{
	return eth_dev->data->dev_private;
}

static inline uint64_t
nicvf_mempool_phy_offset(struct rte_mempool *mp)
{
	struct rte_mempool_memhdr *hdr;

	hdr = STAILQ_FIRST(&mp->mem_list);
	assert(hdr != NULL);
	return (uint64_t)((uintptr_t)hdr->addr - hdr->iova);
}

static inline uint16_t
nicvf_mbuff_meta_length(struct rte_mbuf *mbuf)
{
	return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
}

static inline uint16_t
nicvf_netdev_qidx(struct nicvf *nic, uint8_t local_qidx)
{
	uint16_t global_qidx = local_qidx;

	if (nic->sqs_mode)
		global_qidx += ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);

	return global_qidx;
}

/*
 * Simple phy2virt functions assuming mbufs are in a single huge page
 * V = P + offset
 * P = V - offset
 */
static inline uintptr_t
nicvf_mbuff_phy2virt(rte_iova_t phy, uint64_t mbuf_phys_off)
{
	return (uintptr_t)(phy + mbuf_phys_off);
}

static inline uintptr_t
nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off)
{
	return (rte_iova_t)(virt - mbuf_phys_off);
}

static inline void
nicvf_tx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *tx_start,
	       uint16_t *tx_end)
{
	uint16_t tmp;

	*tx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
				    MAX_SND_QUEUES_PER_QS);
	tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
			     MAX_SND_QUEUES_PER_QS) - 1;
	*tx_end = dev->data->nb_tx_queues ?
		RTE_MIN(tmp, dev->data->nb_tx_queues - 1) : 0;
}

static inline void
nicvf_rx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *rx_start,
	       uint16_t *rx_end)
{
	uint16_t tmp;

	*rx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
				    MAX_RCV_QUEUES_PER_QS);
	tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
			     MAX_RCV_QUEUES_PER_QS) - 1;
	*rx_end = dev->data->nb_rx_queues ?
		RTE_MIN(tmp, dev->data->nb_rx_queues - 1) : 0;
}

#endif /* __THUNDERX_NICVF_ETHDEV_H__  */