1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
|
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Chelsio Communications.
* All rights reserved.
*/
#include "common.h"
#include "l2t.h"
/**
* cxgbe_l2t_release - Release associated L2T entry
* @e: L2T entry to release
*
* Releases ref count and frees up an L2T entry from L2T table
*/
void cxgbe_l2t_release(struct l2t_entry *e)
{
if (rte_atomic32_read(&e->refcnt) != 0)
rte_atomic32_dec(&e->refcnt);
}
/**
* Process a CPL_L2T_WRITE_RPL. Note that the TID in the reply is really
* the L2T index it refers to.
*/
void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
{
struct l2t_data *d = adap->l2t;
unsigned int tid = GET_TID(rpl);
unsigned int l2t_idx = tid % L2T_SIZE;
if (unlikely(rpl->status != CPL_ERR_NONE)) {
dev_err(adap,
"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
rpl->status, l2t_idx);
return;
}
if (tid & F_SYNC_WR) {
struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
t4_os_lock(&e->lock);
if (e->state != L2T_STATE_SWITCHING)
e->state = L2T_STATE_VALID;
t4_os_unlock(&e->lock);
}
}
/**
* Write an L2T entry. Must be called with the entry locked.
* The write may be synchronous or asynchronous.
*/
static int write_l2e(struct rte_eth_dev *dev, struct l2t_entry *e, int sync,
bool loopback, bool arpmiss)
{
struct adapter *adap = ethdev2adap(dev);
struct l2t_data *d = adap->l2t;
struct rte_mbuf *mbuf;
struct cpl_l2t_write_req *req;
struct sge_ctrl_txq *ctrlq;
unsigned int l2t_idx = e->idx + d->l2t_start;
unsigned int port_id = ethdev2pinfo(dev)->port_id;
ctrlq = &adap->sge.ctrlq[port_id];
mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
if (!mbuf)
return -ENOMEM;
mbuf->data_len = sizeof(*req);
mbuf->pkt_len = mbuf->data_len;
req = rte_pktmbuf_mtod(mbuf, struct cpl_l2t_write_req *);
INIT_TP_WR(req, 0);
OPCODE_TID(req) =
cpu_to_be32(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
l2t_idx | V_SYNC_WR(sync) |
V_TID_QID(adap->sge.fw_evtq.abs_id)));
req->params = cpu_to_be16(V_L2T_W_PORT(e->lport) |
V_L2T_W_LPBK(loopback) |
V_L2T_W_ARPMISS(arpmiss) |
V_L2T_W_NOREPLY(!sync));
req->l2t_idx = cpu_to_be16(l2t_idx);
req->vlan = cpu_to_be16(e->vlan);
rte_memcpy(req->dst_mac, e->dmac, ETHER_ADDR_LEN);
if (loopback)
memset(req->dst_mac, 0, ETHER_ADDR_LEN);
t4_mgmt_tx(ctrlq, mbuf);
if (sync && e->state != L2T_STATE_SWITCHING)
e->state = L2T_STATE_SYNC_WRITE;
return 0;
}
/**
* find_or_alloc_l2e - Find/Allocate a free L2T entry
* @d: L2T table
* @vlan: VLAN id to compare/add
* @port: port id to compare/add
* @dmac: Destination MAC address to compare/add
* Returns pointer to the L2T entry found/created
*
* Finds/Allocates an L2T entry to be used by switching rule of a filter.
*/
static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
u8 port, u8 *dmac)
{
struct l2t_entry *end, *e;
struct l2t_entry *first_free = NULL;
for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
if (rte_atomic32_read(&e->refcnt) == 0) {
if (!first_free)
first_free = e;
} else {
if (e->state == L2T_STATE_SWITCHING) {
if ((!memcmp(e->dmac, dmac, ETHER_ADDR_LEN)) &&
e->vlan == vlan && e->lport == port)
goto exists;
}
}
}
if (first_free) {
e = first_free;
goto found;
}
return NULL;
found:
e->state = L2T_STATE_UNUSED;
exists:
return e;
}
static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
u16 vlan, u8 port,
u8 *eth_addr)
{
struct adapter *adap = ethdev2adap(dev);
struct l2t_data *d = adap->l2t;
struct l2t_entry *e;
int ret = 0;
t4_os_write_lock(&d->lock);
e = find_or_alloc_l2e(d, vlan, port, eth_addr);
if (e) {
t4_os_lock(&e->lock);
if (!rte_atomic32_read(&e->refcnt)) {
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
e->lport = port;
rte_memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
rte_atomic32_set(&e->refcnt, 1);
ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
if (ret < 0)
dev_debug(adap, "Failed to write L2T entry: %d",
ret);
} else {
rte_atomic32_inc(&e->refcnt);
}
t4_os_unlock(&e->lock);
}
t4_os_write_unlock(&d->lock);
return ret ? NULL : e;
}
/**
* cxgbe_l2t_alloc_switching - Allocate a L2T entry for switching rule
* @dev: rte_eth_dev pointer
* @vlan: VLAN Id
* @port: Associated port
* @dmac: Destination MAC address to add to L2T
* Returns pointer to the allocated l2t entry
*
* Allocates a L2T entry for use by switching rule of a filter
*/
struct l2t_entry *cxgbe_l2t_alloc_switching(struct rte_eth_dev *dev, u16 vlan,
u8 port, u8 *dmac)
{
return t4_l2t_alloc_switching(dev, vlan, port, dmac);
}
/**
* Initialize L2 Table
*/
struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
{
unsigned int l2t_size;
unsigned int i;
struct l2t_data *d;
if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
return NULL;
l2t_size = l2t_end - l2t_start + 1;
d = t4_os_alloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
if (!d)
return NULL;
d->l2t_start = l2t_start;
d->l2t_size = l2t_size;
t4_os_rwlock_init(&d->lock);
for (i = 0; i < d->l2t_size; ++i) {
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
t4_os_lock_init(&d->l2tab[i].lock);
rte_atomic32_set(&d->l2tab[i].refcnt, 0);
}
return d;
}
/**
* Cleanup L2 Table
*/
void t4_cleanup_l2t(struct adapter *adap)
{
if (adap->l2t)
t4_os_free(adap->l2t);
}
|