aboutsummaryrefslogtreecommitdiffstats
path: root/tests/perf/Long_IPv4_Fib_2M_Intel-X520-DA2.robot
blob: 1c920fabf168ccbfe15b7ef6182926ab9fc370d8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

*** Settings ***
| Resource | resources/libraries/robot/performance.robot
| Force Tags | 3_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV | PERFTEST_LONG
| ...        | FIB_2M | NIC_Intel-X520-DA2
| Suite Setup | 3-node Performance Suite Setup with DUT's NIC model
| ... | L3 | Intel-X520-DA2
| Suite Teardown | 3-node Performance Suite Teardown
| Test Setup | Setup all DUTs before test
| Test Teardown | Run Keyword | Remove startup configuration of VPP from all DUTs
| Documentation | *RFC2544: Pkt throughput IPv4 routing test cases*
| ...
| ... | *[Top] Network Topologies:* TG-DUT1-DUT2-TG 3-node circular topology
| ... | with single links between nodes.
| ... | *[Enc] Packet Encapsulations:* Eth-IPv4 for IPv4 routing.
| ... | *[Cfg] DUT configuration:* DUT1 and DUT2 are configured with IPv4
| ... | routing and 2x1M static IPv4 /32 route entries. DUT1 and DUT2 tested
| ... | with 2p10GE NIC X520 Niantic by Intel.
| ... | *[Ver] TG verification:* TG finds and reports throughput NDR (Non Drop
| ... | Rate) with zero packet loss tolerance or throughput PDR (Partial Drop
| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage
| ... | of packets transmitted. NDR and PDR are discovered for different
| ... | Ethernet L2 frame sizes using either binary search or linear search
| ... | algorithms with configured starting rate and final step that determines
| ... | throughput measurement resolution. Test packets are generated by TG on
| ... | links to DUTs. TG traffic profile contains two L3 flow-groups
| ... | (flow-group per direction, 1M flows per flow-group) with all packets
| ... | containing Ethernet header, IPv4 header with IP protocol=61 and
| ... | static payload. MAC addresses are matching MAC addresses of the TG
| ... | node interfaces. Incrementing of IP.dst (IPv4 destination address) field
| ... | is applied to both streams.
| ... | *[Ref] Applicable standard specifications:* RFC2544.

*** Variables ***
| ${rts_per_flow}= | 1000000

*** Test Cases ***
| TC01: 64B NDR binary search - DUT IPv4 Fib 2x1M - 1thread 1core 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 1 thread, 1 phy core, \
| | ... | 1 receive queue per NIC port. [Ver] Find NDR for 64 Byte frames
| | ... | using binary search start at 10GE linerate, step 100kpps.
| | [Tags] | 1_THREAD_NOHTT_RXQUEUES_1 | SINGLE_THREAD | NDR
| | ${framesize}= | Set Variable | 64
| | ${min_rate}= | Set Variable | 100000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_64B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '1' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}

| TC02: 64B PDR binary search - DUT IPv4 Fib 2x1M - 1thread 1core 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 1 thread, 1 phy core, \
| | ... | 1 receive queue per NIC port. [Ver] Find PDR for 64 Byte frames
| | ... | using binary search start at 10GE linerate, step 100kpps, LT=0.5%.
| | [Tags] | 1_THREAD_NOHTT_RXQUEUES_1 | SINGLE_THREAD | PDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 64
| | ${min_rate}= | Set Variable | 100000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_64B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '1' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}
| | ...                                       | ${glob_loss_acceptance}
| | ...                                       | ${glob_loss_acceptance_type}

| TC03: 1518B NDR binary search - DUT IPv4 Fib 2x1M - 1thread 1core 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 1 thread, 1 phy core, \
| | ... | 1 receive queue per NIC port. [Ver] Find NDR for 1518 Byte frames
| | ... | using binary search start at 10GE linerate, step 10kpps.
| | [Tags] | 1_THREAD_NOHTT_RXQUEUES_1 | SINGLE_THREAD | NDR
| | ${framesize}= | Set Variable | 1518
| | ${min_rate}= | Set Variable | 10000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_1518B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '1' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}

| TC04: 1518B PDR binary search - DUT IPv4 Fib 2x1M - 1thread 1core 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 1 thread, 1 phy core, \
| | ... | 1 receive queue per NIC port. [Ver] Find PDR for 1518 Byte frames
| | ... | using binary search start at 10GE linerate, step 10kpps, LT=0.5%.
| | [Tags] | 1_THREAD_NOHTT_RXQUEUES_1 | SINGLE_THREAD | PDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 1518
| | ${min_rate}= | Set Variable | 10000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_1518B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '1' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}
| | ...                                       | ${glob_loss_acceptance}
| | ...                                       | ${glob_loss_acceptance_type}

| TC05: 9000B NDR binary search - DUT IPv4 Fib 2x1M - 1thread 1core 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 1 thread, 1 phy core, \
| | ... | 1 receive queue per NIC port. [Ver] Find NDR for 9000 Byte frames
| | ... | using binary search start at 10GE linerate, step 5kpps.
| | [Tags] | 1_THREAD_NOHTT_RXQUEUES_1 | SINGLE_THREAD | NDR
| | ${framesize}= | Set Variable | 9000
| | ${min_rate}= | Set Variable | 5000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_9000B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '1' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}

| TC06: 9000B PDR binary search - DUT IPv4 Fib 2x1M - 1thread 1core 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 1 thread, 1 phy core, \
| | ... | 1 receive queue per NIC port. [Ver] Find PDR for 9000 Byte frames
| | ... | using binary search start at 10GE linerate, step 5kpps, LT=0.5%.
| | [Tags] | 1_THREAD_NOHTT_RXQUEUES_1 | SINGLE_THREAD | PDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 9000
| | ${min_rate}= | Set Variable | 5000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_9000B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '1' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}
| | ...                                       | ${glob_loss_acceptance}
| | ...                                       | ${glob_loss_acceptance_type}

| TC07: 64B NDR binary search - DUT IPv4 Fib 2x1M - 2threads 2cores 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 2 threads, 2 phy cores, \
| | ... | 1 receive queue per NIC port. [Ver] Find NDR for 64 Byte frames
| | ... | using binary search start at 10GE linerate, step 100kpps.
| | [Tags] | 2_THREAD_NOHTT_RXQUEUES_1 | MULTI_THREAD | NDR
| | ${framesize}= | Set Variable | 64
| | ${min_rate}= | Set Variable | 100000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_64B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '2' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}

| TC08: 64B PDR binary search - DUT IPv4 Fib 2x1M - 2threads 2cores 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 2 threads, 2 phy cores, \
| | ... | 1 receive queue per NIC port. [Ver] Find PDR for 64 Byte frames
| | ... | using binary search start at 10GE linerate, step 100kpps, LT=0.5%.
| | [Tags] | 2_THREAD_NOHTT_RXQUEUES_1 | MULTI_THREAD | PDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 64
| | ${min_rate}= | Set Variable | 100000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_64B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '2' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}
| | ...                                       | ${glob_loss_acceptance}
| | ...                                       | ${glob_loss_acceptance_type}

| TC09: 1518B NDR binary search - DUT IPv4 Fib 2x1M - 2threads 2cores 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 2 threads, 2 phy cores, \
| | ... | 1 receive queue per NIC port. [Ver] Find NDR for 1518 Byte frames
| | ... | using binary search start at 10GE linerate, step 10kpps.
| | [Tags] | 2_THREAD_NOHTT_RXQUEUES_1 | MULTI_THREAD | NDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 1518
| | ${min_rate}= | Set Variable | 10000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_1518B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '2' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}

| TC10: 1518B PDR binary search - DUT IPv4 Fib 2x1M - 2threads 2cores 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 2 threads, 2 phy cores, \
| | ... | 1 receive queue per NIC port. [Ver] Find PDR for 1518 Byte frames
| | ... | using binary search start at 10GE linerate, step 10kpps, LT=0.5%.
| | [Tags] | 2_THREAD_NOHTT_RXQUEUES_1 | MULTI_THREAD | PDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 1518
| | ${min_rate}= | Set Variable | 10000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_1518B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '2' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}
| | ...                                       | ${glob_loss_acceptance}
| | ...                                       | ${glob_loss_acceptance_type}

| TC11: 9000B NDR binary search - DUT IPv4 Fib 2x1M - 2threads 2cores 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 2 threads, 2 phy cores, \
| | ... | 1 receive queue per NIC port. [Ver] Find NDR for 9000 Byte frames
| | ... | using binary search start at 10GE linerate, step 5kpps.
| | [Tags] | 2_THREAD_NOHTT_RXQUEUES_1 | MULTI_THREAD | NDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 9000
| | ${min_rate}= | Set Variable | 5000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_9000B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '2' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}

| TC12: 9000B PDR binary search - DUT IPv4 Fib 2x1M - 2threads 2cores 1rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 2 threads, 2 phy cores, \
| | ... | 1 receive queue per NIC port. [Ver] Find PDR for 9000 Byte frames
| | ... | using binary search start at 10GE linerate, step 5kpps, LT=0.5%.
| | [Tags] | 2_THREAD_NOHTT_RXQUEUES_1 | MULTI_THREAD | PDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 9000
| | ${min_rate}= | Set Variable | 5000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_9000B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '2' worker threads and rxqueues '1' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}
| | ...                                       | ${glob_loss_acceptance}
| | ...                                       | ${glob_loss_acceptance_type}

| TC13: 64B NDR binary search - DUT IPv4 Fib 2x1M - 4threads 4cores 2rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 4 threads, 4 phy cores, \
| | ... | 2 receive queues per NIC port. [Ver] Find NDR for 64 Byte frames
| | ... | using binary search start at 10GE linerate, step 100kpps.
| | [Tags] | 4_THREAD_NOHTT_RXQUEUES_2 | MULTI_THREAD | NDR
| | ${framesize}= | Set Variable | 64
| | ${min_rate}= | Set Variable | 100000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_64B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '4' worker threads and rxqueues '2' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}

| TC14: 64B PDR binary search - DUT IPv4 Fib 2x1M - 4threads 4cores 2rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 4 threads, 4 phy cores, \
| | ... | 2 receive queues per NIC port. [Ver] Find PDR for 64 Byte frames
| | ... | using binary search start at 10GE linerate, step 100kpps, LT=0.5%.
| | [Tags] | 4_THREAD_NOHTT_RXQUEUES_2 | MULTI_THREAD | PDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 64
| | ${min_rate}= | Set Variable | 100000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_64B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '4' worker threads and rxqueues '2' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}
| | ...                                       | ${glob_loss_acceptance}
| | ...                                       | ${glob_loss_acceptance_type}

| TC15: 1518B NDR binary search - DUT IPv4 Fib 2x1M - 4threads 4cores 2rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 4 threads, 4 phy cores, \
| | ... | 2 receive queues per NIC port. [Ver] Find NDR for 1518 Byte frames
| | ... | using binary search start at 10GE linerate, step 10kpps.
| | [Tags] | 4_THREAD_NOHTT_RXQUEUES_2 | MULTI_THREAD | NDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 1518
| | ${min_rate}= | Set Variable | 10000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_1518B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '4' worker threads and rxqueues '2' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}

| TC16: 1518B PDR binary search - DUT IPv4 Fib 2x1M - 4threads 4cores 2rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 4 threads, 4 phy cores, \
| | ... | 2 receive queues per NIC port. [Ver] Find PDR for 1518 Byte frames
| | ... | using binary search start at 10GE linerate, step 10kpps, LT=0.5%.
| | [Tags] | 4_THREAD_NOHTT_RXQUEUES_2 | MULTI_THREAD | PDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 1518
| | ${min_rate}= | Set Variable | 10000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_1518B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '4' worker threads and rxqueues '2' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Add No Multi Seg to all DUTs
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}
| | ...                                       | ${glob_loss_acceptance}
| | ...                                       | ${glob_loss_acceptance_type}

| TC17: 9000B NDR binary search - DUT IPv4 Fib 2x1M - 4threads 4cores 2rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 4 threads, 4 phy cores, \
| | ... | 2 receive queues per NIC port. [Ver] Find NDR for 9000 Byte frames
| | ... | using binary search start at 10GE linerate, step 5kpps.
| | [Tags] | 4_THREAD_NOHTT_RXQUEUES_2 | MULTI_THREAD | NDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 9000
| | ${min_rate}= | Set Variable | 5000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_9000B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '4' worker threads and rxqueues '2' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}

| TC18: 9000B PDR binary search - DUT IPv4 Fib 2x1M - 4threads 4cores 2rxq
| | [Documentation]
| | ... | [Cfg] DUT runs IPv4 routing config with 4 threads, 4 phy cores, \
| | ... | 2 receive queues per NIC port. [Ver] Find PDR for 9000 Byte frames
| | ... | using binary search start at 10GE linerate, step 5kpps, LT=0.5%.
| | [Tags] | 4_THREAD_NOHTT_RXQUEUES_2 | MULTI_THREAD | PDR | SKIP_PATCH
| | ${framesize}= | Set Variable | 9000
| | ${min_rate}= | Set Variable | 5000
| | ${max_rate}= | Set Variable | ${10Ge_linerate_pps_9000B}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | Given Add '4' worker threads and rxqueues '2' without HTT to all DUTs
| | And   Add PCI devices to DUTs from 3-node single link topology
| | And   Apply startup configuration on all VPP DUTs
| | And   Scale IPv4 forwarding initialized in a 3-node circular topology
| | ...   | ${rts_per_flow}
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ...                                       | ${binary_max}
| | ...                                       | 3-node-IPv4-dst-${rts_per_flow}
| | ...                                       | ${min_rate} | ${max_rate}
| | ...                                       | ${threshold}
| | ...                                       | ${glob_loss_acceptance}
| | ...                                       | ${glob_loss_acceptance_type}
_id_length = ipfix_e_id_length (0, natEvent, 1); f++; f->e_id_length = ipfix_e_id_length (0, sourceIPv6Address, 16); f++; f->e_id_length = ipfix_e_id_length (0, postNATSourceIPv4Address, 4); f++; f->e_id_length = ipfix_e_id_length (0, protocolIdentifier, 1); f++; f->e_id_length = ipfix_e_id_length (0, sourceTransportPort, 2); f++; f->e_id_length = ipfix_e_id_length (0, postNAPTSourceTransportPort, 2); f++; f->e_id_length = ipfix_e_id_length (0, destinationIPv6Address, 16); f++; f->e_id_length = ipfix_e_id_length (0, postNATDestinationIPv4Address, 4); f++; f->e_id_length = ipfix_e_id_length (0, destinationTransportPort, 2); f++; f->e_id_length = ipfix_e_id_length (0, postNAPTDestinationTransportPort, 2); f++; f->e_id_length = ipfix_e_id_length (0, ingressVRFID, 4); f++; } else if (event == QUOTA_EXCEEDED) { if (quota_event == MAX_ENTRIES_PER_USER) { f->e_id_length = ipfix_e_id_length (0, observationTimeMilliseconds, 8); f++; f->e_id_length = ipfix_e_id_length (0, natEvent, 1); f++; f->e_id_length = ipfix_e_id_length (0, natQuotaExceededEvent, 4); f++; f->e_id_length = ipfix_e_id_length (0, maxEntriesPerUser, 4); f++; f->e_id_length = ipfix_e_id_length (0, sourceIPv4Address, 4); f++; } else if (quota_event == MAX_SESSION_ENTRIES) { f->e_id_length = ipfix_e_id_length (0, observationTimeMilliseconds, 8); f++; f->e_id_length = ipfix_e_id_length (0, natEvent, 1); f++; f->e_id_length = ipfix_e_id_length (0, natQuotaExceededEvent, 4); f++; f->e_id_length = ipfix_e_id_length (0, maxSessionEntries, 4); f++; } else if (quota_event == MAX_BIB_ENTRIES) { f->e_id_length = ipfix_e_id_length (0, observationTimeMilliseconds, 8); f++; f->e_id_length = ipfix_e_id_length (0, natEvent, 1); f++; f->e_id_length = ipfix_e_id_length (0, natQuotaExceededEvent, 4); f++; f->e_id_length = ipfix_e_id_length (0, maxBIBEntries, 4); f++; } else if (quota_event == MAX_FRAGMENTS_PENDING_REASSEMBLY) { f->e_id_length = ipfix_e_id_length (0, observationTimeMilliseconds, 8); f++; f->e_id_length = ipfix_e_id_length (0, natEvent, 1); f++; f->e_id_length = ipfix_e_id_length (0, natQuotaExceededEvent, 4); f++; f->e_id_length = ipfix_e_id_length (0, maxFragmentsPendingReassembly, 4); f++; f->e_id_length = ipfix_e_id_length (0, sourceIPv4Address, 4); f++; } else if (quota_event == MAX_FRAGMENTS_PENDING_REASSEMBLY_IP6) { f->e_id_length = ipfix_e_id_length (0, observationTimeMilliseconds, 8); f++; f->e_id_length = ipfix_e_id_length (0, natEvent, 1); f++; f->e_id_length = ipfix_e_id_length (0, natQuotaExceededEvent, 4); f++; f->e_id_length = ipfix_e_id_length (0, maxFragmentsPendingReassembly, 4); f++; f->e_id_length = ipfix_e_id_length (0, sourceIPv6Address, 16); f++; } } /* Back to the template packet... */ ip = (ip4_header_t *) & tp->ip4; udp = (udp_header_t *) (ip + 1); ASSERT (f - first_field); /* Field count in this template */ t->id_count = ipfix_id_count (fr->template_id, f - first_field); /* set length in octets */ s->set_id_length = ipfix_set_id_length (2 /* set_id */ , (u8 *) f - (u8 *) s); /* message length in octets */ h->version_length = version_length ((u8 *) f - (u8 *) h); ip->length = clib_host_to_net_u16 ((u8 *) f - (u8 *) ip); ip->checksum = ip4_header_checksum (ip); return rewrite; } u8 * snat_template_rewrite_addr_exhausted (flow_report_main_t * frm, flow_report_t * fr, ip4_address_t * collector_address, ip4_address_t * src_address, u16 collector_port, ipfix_report_element_t *elts, u32 n_elts, u32 *stream_index) { return snat_template_rewrite (frm, fr, collector_address, src_address, collector_port, NAT_ADDRESSES_EXHAUTED, 0); } u8 * snat_template_rewrite_nat44_session (flow_report_main_t * frm, flow_report_t * fr, ip4_address_t * collector_address, ip4_address_t * src_address, u16 collector_port, ipfix_report_element_t *elts, u32 n_elts, u32 *stream_index) { return snat_template_rewrite (frm, fr, collector_address, src_address, collector_port, NAT44_SESSION_CREATE, 0); } u8 * snat_template_rewrite_max_entries_per_usr (flow_report_main_t * frm, flow_report_t * fr, ip4_address_t * collector_address, ip4_address_t * src_address, u16 collector_port, ipfix_report_element_t *elts, u32 n_elts, u32 *stream_index) { return snat_template_rewrite (frm, fr, collector_address, src_address, collector_port, QUOTA_EXCEEDED, MAX_ENTRIES_PER_USER); } u8 * nat_template_rewrite_max_sessions (flow_report_main_t * frm, flow_report_t * fr, ip4_address_t * collector_address, ip4_address_t * src_address, u16 collector_port, ipfix_report_element_t *elts, u32 n_elts, u32 *stream_index) { return snat_template_rewrite (frm, fr, collector_address, src_address, collector_port, QUOTA_EXCEEDED, MAX_SESSION_ENTRIES); } u8 * nat_template_rewrite_max_bibs (flow_report_main_t * frm, flow_report_t * fr, ip4_address_t * collector_address, ip4_address_t * src_address, u16 collector_port, ipfix_report_element_t *elts, u32 n_elts, u32 *stream_index) { return snat_template_rewrite (frm, fr, collector_address, src_address, collector_port, QUOTA_EXCEEDED, MAX_BIB_ENTRIES); } u8 * nat_template_rewrite_max_frags_ip4 (flow_report_main_t * frm, flow_report_t * fr, ip4_address_t * collector_address, ip4_address_t * src_address, u16 collector_port, ipfix_report_element_t *elts, u32 n_elts, u32 *stream_index) { return snat_template_rewrite (frm, fr, collector_address, src_address, collector_port, QUOTA_EXCEEDED, MAX_FRAGMENTS_PENDING_REASSEMBLY); } u8 * nat_template_rewrite_max_frags_ip6 (flow_report_main_t * frm, flow_report_t * fr, ip4_address_t * collector_address, ip4_address_t * src_address, u16 collector_port, ipfix_report_element_t *elts, u32 n_elts, u32 *stream_index) { return snat_template_rewrite (frm, fr, collector_address, src_address, collector_port, QUOTA_EXCEEDED, MAX_FRAGMENTS_PENDING_REASSEMBLY_IP6); } u8 * nat_template_rewrite_nat64_bib (flow_report_main_t * frm, flow_report_t * fr, ip4_address_t * collector_address, ip4_address_t * src_address, u16 collector_port, ipfix_report_element_t *elts, u32 n_elts, u32 *stream_index) { return snat_template_rewrite (frm, fr, collector_address, src_address, collector_port, NAT64_BIB_CREATE, 0); } u8 * nat_template_rewrite_nat64_session (flow_report_main_t * frm, flow_report_t * fr, ip4_address_t * collector_address, ip4_address_t * src_address, u16 collector_port, ipfix_report_element_t *elts, u32 n_elts, u32 *stream_index) { return snat_template_rewrite (frm, fr, collector_address, src_address, collector_port, NAT64_SESSION_CREATE, 0); } static inline void snat_ipfix_header_create (flow_report_main_t * frm, vlib_buffer_t * b0, u32 * offset) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; flow_report_stream_t *stream; ip4_ipfix_template_packet_t *tp; ipfix_message_header_t *h = 0; ipfix_set_header_t *s = 0; u32 sequence_number; u32 stream_index; ip4_header_t *ip; udp_header_t *udp; stream_index = clib_atomic_fetch_or(&silm->stream_index, 0); stream = &frm->streams[stream_index]; b0->current_data = 0; b0->current_length = sizeof (*ip) + sizeof (*udp) + sizeof (*h) + sizeof (*s); b0->flags |= (VLIB_BUFFER_TOTAL_LENGTH_VALID | VNET_BUFFER_F_FLOW_REPORT); vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0; vnet_buffer (b0)->sw_if_index[VLIB_TX] = frm->fib_index; tp = vlib_buffer_get_current (b0); ip = (ip4_header_t *) & tp->ip4; udp = (udp_header_t *) (ip + 1); h = (ipfix_message_header_t *) (udp + 1); s = (ipfix_set_header_t *) (h + 1); ip->ip_version_and_header_length = 0x45; ip->ttl = 254; ip->protocol = IP_PROTOCOL_UDP; ip->flags_and_fragment_offset = 0; ip->src_address.as_u32 = frm->src_address.as_u32; ip->dst_address.as_u32 = frm->ipfix_collector.as_u32; udp->src_port = clib_host_to_net_u16 (stream->src_port); udp->dst_port = clib_host_to_net_u16 (frm->collector_port); udp->checksum = 0; h->export_time = clib_host_to_net_u32 ((u32) (((f64) frm->unix_time_0) + (vlib_time_now (frm->vlib_main) - frm->vlib_time_0))); sequence_number = clib_atomic_fetch_add (&stream->sequence_number, 1); h->sequence_number = clib_host_to_net_u32 (sequence_number); h->domain_id = clib_host_to_net_u32 (stream->domain_id); *offset = (u32) (((u8 *) (s + 1)) - (u8 *) tp); } static inline void snat_ipfix_send (flow_report_main_t * frm, vlib_frame_t * f, vlib_buffer_t * b0, u16 template_id) { ip4_ipfix_template_packet_t *tp; ipfix_message_header_t *h = 0; ipfix_set_header_t *s = 0; ip4_header_t *ip; udp_header_t *udp; vlib_main_t *vm = frm->vlib_main; tp = vlib_buffer_get_current (b0); ip = (ip4_header_t *) & tp->ip4; udp = (udp_header_t *) (ip + 1); h = (ipfix_message_header_t *) (udp + 1); s = (ipfix_set_header_t *) (h + 1); s->set_id_length = ipfix_set_id_length (template_id, b0->current_length - (sizeof (*ip) + sizeof (*udp) + sizeof (*h))); h->version_length = version_length (b0->current_length - (sizeof (*ip) + sizeof (*udp))); ip->length = clib_host_to_net_u16 (b0->current_length); ip->checksum = ip4_header_checksum (ip); udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip)); if (frm->udp_checksum) { udp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip); if (udp->checksum == 0) udp->checksum = 0xffff; } ASSERT (ip->checksum == ip4_header_checksum (ip)); vlib_put_frame_to_node (vm, ip4_lookup_node.index, f); } static void snat_ipfix_logging_nat44_ses (u32 thread_index, u8 nat_event, u32 src_ip, u32 nat_src_ip, snat_protocol_t snat_proto, u16 src_port, u16 nat_src_port, u32 vrf_id, int do_flush) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; snat_ipfix_per_thread_data_t *sitd = &silm->per_thread_data[thread_index]; flow_report_main_t *frm = &flow_report_main; vlib_frame_t *f; vlib_buffer_t *b0 = 0; u32 bi0 = ~0; u32 offset; vlib_main_t *vm = frm->vlib_main; u64 now; u8 proto = ~0; u16 template_id; proto = snat_proto_to_ip_proto (snat_proto); now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3); now += silm->milisecond_time_0; b0 = sitd->nat44_session_buffer; if (PREDICT_FALSE (b0 == 0)) { if (do_flush) return; if (vlib_buffer_alloc (vm, &bi0, 1) != 1) { nat_log_err ("can't allocate buffer for NAT IPFIX event"); return; } b0 = sitd->nat44_session_buffer = vlib_get_buffer (vm, bi0); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); offset = 0; } else { bi0 = vlib_get_buffer_index (vm, b0); offset = sitd->nat44_session_next_record_offset; } f = sitd->nat44_session_frame; if (PREDICT_FALSE (f == 0)) { u32 *to_next; f = vlib_get_frame_to_node (vm, ip4_lookup_node.index); sitd->nat44_session_frame = f; to_next = vlib_frame_vector_args (f); to_next[0] = bi0; f->n_vectors = 1; } if (PREDICT_FALSE (offset == 0)) snat_ipfix_header_create (frm, b0, &offset); if (PREDICT_TRUE (do_flush == 0)) { u64 time_stamp = clib_host_to_net_u64 (now); clib_memcpy_fast (b0->data + offset, &time_stamp, sizeof (time_stamp)); offset += sizeof (time_stamp); clib_memcpy_fast (b0->data + offset, &nat_event, sizeof (nat_event)); offset += sizeof (nat_event); clib_memcpy_fast (b0->data + offset, &src_ip, sizeof (src_ip)); offset += sizeof (src_ip); clib_memcpy_fast (b0->data + offset, &nat_src_ip, sizeof (nat_src_ip)); offset += sizeof (nat_src_ip); clib_memcpy_fast (b0->data + offset, &proto, sizeof (proto)); offset += sizeof (proto); clib_memcpy_fast (b0->data + offset, &src_port, sizeof (src_port)); offset += sizeof (src_port); clib_memcpy_fast (b0->data + offset, &nat_src_port, sizeof (nat_src_port)); offset += sizeof (nat_src_port); clib_memcpy_fast (b0->data + offset, &vrf_id, sizeof (vrf_id)); offset += sizeof (vrf_id); b0->current_length += NAT44_SESSION_CREATE_LEN; } if (PREDICT_FALSE (do_flush || (offset + NAT44_SESSION_CREATE_LEN) > frm->path_mtu)) { template_id = clib_atomic_fetch_or ( &silm->nat44_session_template_id, 0); snat_ipfix_send (frm, f, b0, template_id); sitd->nat44_session_frame = 0; sitd->nat44_session_buffer = 0; offset = 0; } sitd->nat44_session_next_record_offset = offset; } static void snat_ipfix_logging_addr_exhausted (u32 thread_index, u32 pool_id, int do_flush) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; snat_ipfix_per_thread_data_t *sitd = &silm->per_thread_data[thread_index]; flow_report_main_t *frm = &flow_report_main; vlib_frame_t *f; vlib_buffer_t *b0 = 0; u32 bi0 = ~0; u32 offset; vlib_main_t *vm = frm->vlib_main; u64 now; u8 nat_event = NAT_ADDRESSES_EXHAUTED; u16 template_id; now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3); now += silm->milisecond_time_0; b0 = sitd->addr_exhausted_buffer; if (PREDICT_FALSE (b0 == 0)) { if (do_flush) return; if (vlib_buffer_alloc (vm, &bi0, 1) != 1) { nat_log_err ("can't allocate buffer for NAT IPFIX event"); return; } b0 = sitd->addr_exhausted_buffer = vlib_get_buffer (vm, bi0); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); offset = 0; } else { bi0 = vlib_get_buffer_index (vm, b0); offset = sitd->addr_exhausted_next_record_offset; } f = sitd->addr_exhausted_frame; if (PREDICT_FALSE (f == 0)) { u32 *to_next; f = vlib_get_frame_to_node (vm, ip4_lookup_node.index); sitd->addr_exhausted_frame = f; to_next = vlib_frame_vector_args (f); to_next[0] = bi0; f->n_vectors = 1; } if (PREDICT_FALSE (offset == 0)) snat_ipfix_header_create (frm, b0, &offset); if (PREDICT_TRUE (do_flush == 0)) { u64 time_stamp = clib_host_to_net_u64 (now); clib_memcpy_fast (b0->data + offset, &time_stamp, sizeof (time_stamp)); offset += sizeof (time_stamp); clib_memcpy_fast (b0->data + offset, &nat_event, sizeof (nat_event)); offset += sizeof (nat_event); clib_memcpy_fast (b0->data + offset, &pool_id, sizeof (pool_id)); offset += sizeof (pool_id); b0->current_length += NAT_ADDRESSES_EXHAUTED_LEN; } if (PREDICT_FALSE (do_flush || (offset + NAT_ADDRESSES_EXHAUTED_LEN) > frm->path_mtu)) { template_id = clib_atomic_fetch_or ( &silm->addr_exhausted_template_id, 0); snat_ipfix_send (frm, f, b0, template_id); sitd->addr_exhausted_frame = 0; sitd->addr_exhausted_buffer = 0; offset = 0; } sitd->addr_exhausted_next_record_offset = offset; } static void snat_ipfix_logging_max_entries_per_usr (u32 thread_index, u32 limit, u32 src_ip, int do_flush) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; snat_ipfix_per_thread_data_t *sitd = &silm->per_thread_data[thread_index]; flow_report_main_t *frm = &flow_report_main; vlib_frame_t *f; vlib_buffer_t *b0 = 0; u32 bi0 = ~0; u32 offset; vlib_main_t *vm = frm->vlib_main; u64 now; u8 nat_event = QUOTA_EXCEEDED; u32 quota_event = MAX_ENTRIES_PER_USER; u16 template_id; now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3); now += silm->milisecond_time_0; b0 = sitd->max_entries_per_user_buffer; if (PREDICT_FALSE (b0 == 0)) { if (do_flush) return; if (vlib_buffer_alloc (vm, &bi0, 1) != 1) { nat_log_err ("can't allocate buffer for NAT IPFIX event"); return; } b0 = sitd->max_entries_per_user_buffer = vlib_get_buffer (vm, bi0); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); offset = 0; } else { bi0 = vlib_get_buffer_index (vm, b0); offset = sitd->max_entries_per_user_next_record_offset; } f = sitd->max_entries_per_user_frame; if (PREDICT_FALSE (f == 0)) { u32 *to_next; f = vlib_get_frame_to_node (vm, ip4_lookup_node.index); sitd->max_entries_per_user_frame = f; to_next = vlib_frame_vector_args (f); to_next[0] = bi0; f->n_vectors = 1; } if (PREDICT_FALSE (offset == 0)) snat_ipfix_header_create (frm, b0, &offset); if (PREDICT_TRUE (do_flush == 0)) { u64 time_stamp = clib_host_to_net_u64 (now); clib_memcpy_fast (b0->data + offset, &time_stamp, sizeof (time_stamp)); offset += sizeof (time_stamp); clib_memcpy_fast (b0->data + offset, &nat_event, sizeof (nat_event)); offset += sizeof (nat_event); clib_memcpy_fast (b0->data + offset, &quota_event, sizeof (quota_event)); offset += sizeof (quota_event); clib_memcpy_fast (b0->data + offset, &limit, sizeof (limit)); offset += sizeof (limit); clib_memcpy_fast (b0->data + offset, &src_ip, sizeof (src_ip)); offset += sizeof (src_ip); b0->current_length += MAX_ENTRIES_PER_USER_LEN; } if (PREDICT_FALSE (do_flush || (offset + MAX_ENTRIES_PER_USER_LEN) > frm->path_mtu)) { template_id = clib_atomic_fetch_or ( &silm->max_entries_per_user_template_id, 0); snat_ipfix_send (frm, f, b0, template_id); sitd->max_entries_per_user_frame = 0; sitd->max_entries_per_user_buffer = 0; offset = 0; } sitd->max_entries_per_user_next_record_offset = offset; } static void nat_ipfix_logging_max_ses (u32 thread_index, u32 limit, int do_flush) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; snat_ipfix_per_thread_data_t *sitd = &silm->per_thread_data[thread_index]; flow_report_main_t *frm = &flow_report_main; vlib_frame_t *f; vlib_buffer_t *b0 = 0; u32 bi0 = ~0; u32 offset; vlib_main_t *vm = frm->vlib_main; u64 now; u8 nat_event = QUOTA_EXCEEDED; u32 quota_event = MAX_SESSION_ENTRIES; u16 template_id; now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3); now += silm->milisecond_time_0; b0 = sitd->max_sessions_buffer; if (PREDICT_FALSE (b0 == 0)) { if (do_flush) return; if (vlib_buffer_alloc (vm, &bi0, 1) != 1) { nat_log_err ("can't allocate buffer for NAT IPFIX event"); return; } b0 = sitd->max_sessions_buffer = vlib_get_buffer (vm, bi0); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); offset = 0; } else { bi0 = vlib_get_buffer_index (vm, b0); offset = sitd->max_sessions_next_record_offset; } f = sitd->max_sessions_frame; if (PREDICT_FALSE (f == 0)) { u32 *to_next; f = vlib_get_frame_to_node (vm, ip4_lookup_node.index); sitd->max_sessions_frame = f; to_next = vlib_frame_vector_args (f); to_next[0] = bi0; f->n_vectors = 1; } if (PREDICT_FALSE (offset == 0)) snat_ipfix_header_create (frm, b0, &offset); if (PREDICT_TRUE (do_flush == 0)) { u64 time_stamp = clib_host_to_net_u64 (now); clib_memcpy_fast (b0->data + offset, &time_stamp, sizeof (time_stamp)); offset += sizeof (time_stamp); clib_memcpy_fast (b0->data + offset, &nat_event, sizeof (nat_event)); offset += sizeof (nat_event); clib_memcpy_fast (b0->data + offset, &quota_event, sizeof (quota_event)); offset += sizeof (quota_event); clib_memcpy_fast (b0->data + offset, &limit, sizeof (limit)); offset += sizeof (limit); b0->current_length += MAX_SESSIONS_LEN; } if (PREDICT_FALSE (do_flush || (offset + MAX_SESSIONS_LEN) > frm->path_mtu)) { template_id = clib_atomic_fetch_or ( &silm->max_sessions_template_id, 0); snat_ipfix_send (frm, f, b0, template_id); sitd->max_sessions_frame = 0; sitd->max_sessions_buffer = 0; offset = 0; } sitd->max_sessions_next_record_offset = offset; } static void nat_ipfix_logging_max_bib (u32 thread_index, u32 limit, int do_flush) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; snat_ipfix_per_thread_data_t *sitd = &silm->per_thread_data[thread_index]; flow_report_main_t *frm = &flow_report_main; vlib_frame_t *f; vlib_buffer_t *b0 = 0; u32 bi0 = ~0; u32 offset; vlib_main_t *vm = frm->vlib_main; u64 now; u8 nat_event = QUOTA_EXCEEDED; u32 quota_event = MAX_BIB_ENTRIES; u16 template_id; now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3); now += silm->milisecond_time_0; b0 = sitd->max_bibs_buffer; if (PREDICT_FALSE (b0 == 0)) { if (do_flush) return; if (vlib_buffer_alloc (vm, &bi0, 1) != 1) { nat_log_err ("can't allocate buffer for NAT IPFIX event"); return; } b0 = sitd->max_bibs_buffer = vlib_get_buffer (vm, bi0); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); offset = 0; } else { bi0 = vlib_get_buffer_index (vm, b0); offset = sitd->max_bibs_next_record_offset; } f = sitd->max_bibs_frame; if (PREDICT_FALSE (f == 0)) { u32 *to_next; f = vlib_get_frame_to_node (vm, ip4_lookup_node.index); sitd->max_bibs_frame = f; to_next = vlib_frame_vector_args (f); to_next[0] = bi0; f->n_vectors = 1; } if (PREDICT_FALSE (offset == 0)) snat_ipfix_header_create (frm, b0, &offset); if (PREDICT_TRUE (do_flush == 0)) { u64 time_stamp = clib_host_to_net_u64 (now); clib_memcpy_fast (b0->data + offset, &time_stamp, sizeof (time_stamp)); offset += sizeof (time_stamp); clib_memcpy_fast (b0->data + offset, &nat_event, sizeof (nat_event)); offset += sizeof (nat_event); clib_memcpy_fast (b0->data + offset, &quota_event, sizeof (quota_event)); offset += sizeof (quota_event); clib_memcpy_fast (b0->data + offset, &limit, sizeof (limit)); offset += sizeof (limit); b0->current_length += MAX_BIBS_LEN; } if (PREDICT_FALSE (do_flush || (offset + MAX_BIBS_LEN) > frm->path_mtu)) { template_id = clib_atomic_fetch_or ( &silm->max_bibs_template_id, 0); snat_ipfix_send (frm, f, b0, template_id); sitd->max_bibs_frame = 0; sitd->max_bibs_buffer = 0; offset = 0; } sitd->max_bibs_next_record_offset = offset; } static void nat_ipfix_logging_max_frag_ip4 (u32 thread_index, u32 limit, u32 src, int do_flush) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; snat_ipfix_per_thread_data_t *sitd = &silm->per_thread_data[thread_index]; flow_report_main_t *frm = &flow_report_main; vlib_frame_t *f; vlib_buffer_t *b0 = 0; u32 bi0 = ~0; u32 offset; vlib_main_t *vm = frm->vlib_main; u64 now; u8 nat_event = QUOTA_EXCEEDED; u32 quota_event = MAX_FRAGMENTS_PENDING_REASSEMBLY; u16 template_id; now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3); now += silm->milisecond_time_0; b0 = sitd->max_frags_ip4_buffer; if (PREDICT_FALSE (b0 == 0)) { if (do_flush) return; if (vlib_buffer_alloc (vm, &bi0, 1) != 1) { nat_log_err ("can't allocate buffer for NAT IPFIX event"); return; } b0 = sitd->max_frags_ip4_buffer = vlib_get_buffer (vm, bi0); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); offset = 0; } else { bi0 = vlib_get_buffer_index (vm, b0); offset = sitd->max_frags_ip4_next_record_offset; } f = sitd->max_frags_ip4_frame; if (PREDICT_FALSE (f == 0)) { u32 *to_next; f = vlib_get_frame_to_node (vm, ip4_lookup_node.index); sitd->max_frags_ip4_frame = f; to_next = vlib_frame_vector_args (f); to_next[0] = bi0; f->n_vectors = 1; } if (PREDICT_FALSE (offset == 0)) snat_ipfix_header_create (frm, b0, &offset); if (PREDICT_TRUE (do_flush == 0)) { u64 time_stamp = clib_host_to_net_u64 (now); clib_memcpy_fast (b0->data + offset, &time_stamp, sizeof (time_stamp)); offset += sizeof (time_stamp); clib_memcpy_fast (b0->data + offset, &nat_event, sizeof (nat_event)); offset += sizeof (nat_event); clib_memcpy_fast (b0->data + offset, &quota_event, sizeof (quota_event)); offset += sizeof (quota_event); clib_memcpy_fast (b0->data + offset, &limit, sizeof (limit)); offset += sizeof (limit); clib_memcpy_fast (b0->data + offset, &src, sizeof (src)); offset += sizeof (src); b0->current_length += MAX_FRAGMENTS_IP4_LEN; } if (PREDICT_FALSE (do_flush || (offset + MAX_BIBS_LEN) > frm->path_mtu)) { template_id = clib_atomic_fetch_or ( &silm->max_frags_ip4_template_id, 0); snat_ipfix_send (frm, f, b0, template_id); sitd->max_frags_ip4_frame = 0; sitd->max_frags_ip4_buffer = 0; offset = 0; } sitd->max_frags_ip4_next_record_offset = offset; } static void nat_ipfix_logging_max_frag_ip6 (u32 thread_index, u32 limit, ip6_address_t * src, int do_flush) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; snat_ipfix_per_thread_data_t *sitd = &silm->per_thread_data[thread_index]; flow_report_main_t *frm = &flow_report_main; vlib_frame_t *f; vlib_buffer_t *b0 = 0; u32 bi0 = ~0; u32 offset; vlib_main_t *vm = frm->vlib_main; u64 now; u8 nat_event = QUOTA_EXCEEDED; u32 quota_event = MAX_FRAGMENTS_PENDING_REASSEMBLY; u16 template_id; now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3); now += silm->milisecond_time_0; b0 = sitd->max_frags_ip6_buffer; if (PREDICT_FALSE (b0 == 0)) { if (do_flush) return; if (vlib_buffer_alloc (vm, &bi0, 1) != 1) { nat_log_err ("can't allocate buffer for NAT IPFIX event"); return; } b0 = sitd->max_frags_ip6_buffer = vlib_get_buffer (vm, bi0); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); offset = 0; } else { bi0 = vlib_get_buffer_index (vm, b0); offset = sitd->max_frags_ip6_next_record_offset; } f = sitd->max_frags_ip6_frame; if (PREDICT_FALSE (f == 0)) { u32 *to_next; f = vlib_get_frame_to_node (vm, ip4_lookup_node.index); sitd->max_frags_ip6_frame = f; to_next = vlib_frame_vector_args (f); to_next[0] = bi0; f->n_vectors = 1; } if (PREDICT_FALSE (offset == 0)) snat_ipfix_header_create (frm, b0, &offset); if (PREDICT_TRUE (do_flush == 0)) { u64 time_stamp = clib_host_to_net_u64 (now); clib_memcpy_fast (b0->data + offset, &time_stamp, sizeof (time_stamp)); offset += sizeof (time_stamp); clib_memcpy_fast (b0->data + offset, &nat_event, sizeof (nat_event)); offset += sizeof (nat_event); clib_memcpy_fast (b0->data + offset, &quota_event, sizeof (quota_event)); offset += sizeof (quota_event); clib_memcpy_fast (b0->data + offset, &limit, sizeof (limit)); offset += sizeof (limit); clib_memcpy_fast (b0->data + offset, src, sizeof (ip6_address_t)); offset += sizeof (ip6_address_t); b0->current_length += MAX_FRAGMENTS_IP6_LEN; } if (PREDICT_FALSE (do_flush || (offset + MAX_BIBS_LEN) > frm->path_mtu)) { template_id = clib_atomic_fetch_or ( &silm->max_frags_ip6_template_id, 0); snat_ipfix_send (frm, f, b0, template_id); sitd->max_frags_ip6_frame = 0; sitd->max_frags_ip6_buffer = 0; offset = 0; } sitd->max_frags_ip6_next_record_offset = offset; } static void nat_ipfix_logging_nat64_bibe (u32 thread_index, u8 nat_event, ip6_address_t * src_ip, u32 nat_src_ip, u8 proto, u16 src_port, u16 nat_src_port, u32 vrf_id, int do_flush) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; snat_ipfix_per_thread_data_t *sitd = &silm->per_thread_data[thread_index]; flow_report_main_t *frm = &flow_report_main; vlib_frame_t *f; vlib_buffer_t *b0 = 0; u32 bi0 = ~0; u32 offset; vlib_main_t *vm = frm->vlib_main; u64 now; u16 template_id; now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3); now += silm->milisecond_time_0; b0 = sitd->nat64_bib_buffer; if (PREDICT_FALSE (b0 == 0)) { if (do_flush) return; if (vlib_buffer_alloc (vm, &bi0, 1) != 1) { nat_log_err ("can't allocate buffer for NAT IPFIX event"); return; } b0 = sitd->nat64_bib_buffer = vlib_get_buffer (vm, bi0); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); offset = 0; } else { bi0 = vlib_get_buffer_index (vm, b0); offset = sitd->nat64_bib_next_record_offset; } f = sitd->nat64_bib_frame; if (PREDICT_FALSE (f == 0)) { u32 *to_next; f = vlib_get_frame_to_node (vm, ip4_lookup_node.index); sitd->nat64_bib_frame = f; to_next = vlib_frame_vector_args (f); to_next[0] = bi0; f->n_vectors = 1; } if (PREDICT_FALSE (offset == 0)) snat_ipfix_header_create (frm, b0, &offset); if (PREDICT_TRUE (do_flush == 0)) { u64 time_stamp = clib_host_to_net_u64 (now); clib_memcpy_fast (b0->data + offset, &time_stamp, sizeof (time_stamp)); offset += sizeof (time_stamp); clib_memcpy_fast (b0->data + offset, &nat_event, sizeof (nat_event)); offset += sizeof (nat_event); clib_memcpy_fast (b0->data + offset, src_ip, sizeof (ip6_address_t)); offset += sizeof (ip6_address_t); clib_memcpy_fast (b0->data + offset, &nat_src_ip, sizeof (nat_src_ip)); offset += sizeof (nat_src_ip); clib_memcpy_fast (b0->data + offset, &proto, sizeof (proto)); offset += sizeof (proto); clib_memcpy_fast (b0->data + offset, &src_port, sizeof (src_port)); offset += sizeof (src_port); clib_memcpy_fast (b0->data + offset, &nat_src_port, sizeof (nat_src_port)); offset += sizeof (nat_src_port); clib_memcpy_fast (b0->data + offset, &vrf_id, sizeof (vrf_id)); offset += sizeof (vrf_id); b0->current_length += NAT64_BIB_LEN; } if (PREDICT_FALSE (do_flush || (offset + NAT64_BIB_LEN) > frm->path_mtu)) { template_id = clib_atomic_fetch_or ( &silm->nat64_bib_template_id, 0); snat_ipfix_send (frm, f, b0, template_id); sitd->nat64_bib_frame = 0; sitd->nat64_bib_buffer = 0; offset = 0; } sitd->nat64_bib_next_record_offset = offset; } static void nat_ipfix_logging_nat64_ses (u32 thread_index, u8 nat_event, ip6_address_t * src_ip, u32 nat_src_ip, u8 proto, u16 src_port, u16 nat_src_port, ip6_address_t * dst_ip, u32 nat_dst_ip, u16 dst_port, u16 nat_dst_port, u32 vrf_id, int do_flush) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; snat_ipfix_per_thread_data_t *sitd = &silm->per_thread_data[thread_index]; flow_report_main_t *frm = &flow_report_main; vlib_frame_t *f; vlib_buffer_t *b0 = 0; u32 bi0 = ~0; u32 offset; vlib_main_t *vm = frm->vlib_main; u64 now; u16 template_id; now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3); now += silm->milisecond_time_0; b0 = sitd->nat64_ses_buffer; if (PREDICT_FALSE (b0 == 0)) { if (do_flush) return; if (vlib_buffer_alloc (vm, &bi0, 1) != 1) { nat_log_err ("can't allocate buffer for NAT IPFIX event"); return; } b0 = sitd->nat64_ses_buffer = vlib_get_buffer (vm, bi0); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); offset = 0; } else { bi0 = vlib_get_buffer_index (vm, b0); offset = sitd->nat64_ses_next_record_offset; } f = sitd->nat64_ses_frame; if (PREDICT_FALSE (f == 0)) { u32 *to_next; f = vlib_get_frame_to_node (vm, ip4_lookup_node.index); sitd->nat64_ses_frame = f; to_next = vlib_frame_vector_args (f); to_next[0] = bi0; f->n_vectors = 1; } if (PREDICT_FALSE (offset == 0)) snat_ipfix_header_create (frm, b0, &offset); if (PREDICT_TRUE (do_flush == 0)) { u64 time_stamp = clib_host_to_net_u64 (now); clib_memcpy_fast (b0->data + offset, &time_stamp, sizeof (time_stamp)); offset += sizeof (time_stamp); clib_memcpy_fast (b0->data + offset, &nat_event, sizeof (nat_event)); offset += sizeof (nat_event); clib_memcpy_fast (b0->data + offset, src_ip, sizeof (ip6_address_t)); offset += sizeof (ip6_address_t); clib_memcpy_fast (b0->data + offset, &nat_src_ip, sizeof (nat_src_ip)); offset += sizeof (nat_src_ip); clib_memcpy_fast (b0->data + offset, &proto, sizeof (proto)); offset += sizeof (proto); clib_memcpy_fast (b0->data + offset, &src_port, sizeof (src_port)); offset += sizeof (src_port); clib_memcpy_fast (b0->data + offset, &nat_src_port, sizeof (nat_src_port)); offset += sizeof (nat_src_port); clib_memcpy_fast (b0->data + offset, dst_ip, sizeof (ip6_address_t)); offset += sizeof (ip6_address_t); clib_memcpy_fast (b0->data + offset, &nat_dst_ip, sizeof (nat_dst_ip)); offset += sizeof (nat_dst_ip); clib_memcpy_fast (b0->data + offset, &dst_port, sizeof (dst_port)); offset += sizeof (dst_port); clib_memcpy_fast (b0->data + offset, &nat_dst_port, sizeof (nat_dst_port)); offset += sizeof (nat_dst_port); clib_memcpy_fast (b0->data + offset, &vrf_id, sizeof (vrf_id)); offset += sizeof (vrf_id); b0->current_length += NAT64_SES_LEN; } if (PREDICT_FALSE (do_flush || (offset + NAT64_SES_LEN) > frm->path_mtu)) { template_id = clib_atomic_fetch_or ( &silm->nat64_ses_template_id, 0); snat_ipfix_send (frm, f, b0, template_id); sitd->nat64_ses_frame = 0; sitd->nat64_ses_buffer = 0; offset = 0; } sitd->nat64_ses_next_record_offset = offset; } void snat_ipfix_flush (u32 thread_index) { int do_flush = 1; snat_ipfix_logging_nat44_ses (thread_index, 0, 0, 0, 0, 0, 0, 0, do_flush); snat_ipfix_logging_addr_exhausted (thread_index, 0, do_flush); snat_ipfix_logging_max_entries_per_usr (thread_index, 0, 0, do_flush); nat_ipfix_logging_max_ses (thread_index, 0, do_flush); nat_ipfix_logging_max_bib (thread_index, 0, do_flush); nat_ipfix_logging_max_frag_ip4 (thread_index, 0, 0, do_flush); nat_ipfix_logging_max_frag_ip6 (thread_index, 0, 0, do_flush); nat_ipfix_logging_nat64_bibe (thread_index, 0, 0, 0, 0, 0, 0, 0, do_flush); nat_ipfix_logging_nat64_ses (thread_index, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, do_flush); } void snat_ipfix_flush_from_main (void) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; vlib_main_t *worker_vm; int i; if (PREDICT_TRUE (!clib_atomic_fetch_or(&silm->enabled, 0))) return; if (PREDICT_FALSE (!silm->worker_vms)) { for (i = 1; i < vec_len (vlib_mains); i++) { worker_vm = vlib_mains[i]; if (worker_vm) vec_add1 (silm->worker_vms, worker_vm); } } /* Trigger flush for each worker thread */ for (i = 1; i < vec_len (silm->worker_vms); i++) { worker_vm = silm->worker_vms[i]; if (worker_vm) vlib_node_set_interrupt_pending (worker_vm, snat_ipfix_flush_node.index); } /* Finally flush main thread */ snat_ipfix_flush (0); } /** * @brief Generate NAT44 session create event * * @param thread_index thread index * @param src_ip source IPv4 address * @param nat_src_ip transaltes source IPv4 address * @param snat_proto NAT transport protocol * @param src_port source port * @param nat_src_port translated source port * @param vrf_id VRF ID */ void snat_ipfix_logging_nat44_ses_create (u32 thread_index, u32 src_ip, u32 nat_src_ip, snat_protocol_t snat_proto, u16 src_port, u16 nat_src_port, u32 vrf_id) { skip_if_disabled (); snat_ipfix_logging_nat44_ses (thread_index, NAT44_SESSION_CREATE, src_ip, nat_src_ip, snat_proto, src_port, nat_src_port, vrf_id, 0); } /** * @brief Generate NAT44 session delete event * * @param thread_index thread index * @param src_ip source IPv4 address * @param nat_src_ip transaltes source IPv4 address * @param snat_proto NAT transport protocol * @param src_port source port * @param nat_src_port translated source port * @param vrf_id VRF ID */ void snat_ipfix_logging_nat44_ses_delete (u32 thread_index, u32 src_ip, u32 nat_src_ip, snat_protocol_t snat_proto, u16 src_port, u16 nat_src_port, u32 vrf_id) { skip_if_disabled (); snat_ipfix_logging_nat44_ses (thread_index, NAT44_SESSION_DELETE, src_ip, nat_src_ip, snat_proto, src_port, nat_src_port, vrf_id, 0); } /** * @brief Generate NAT addresses exhausted event * * @param thread_index thread index * @param pool_id NAT pool ID */ void snat_ipfix_logging_addresses_exhausted (u32 thread_index, u32 pool_id) { //TODO: This event SHOULD be rate limited skip_if_disabled (); snat_ipfix_logging_addr_exhausted (thread_index, pool_id, 0); } /** * @brief Generate maximum entries per user exceeded event * * @param thread_index thread index * @param limit maximum NAT entries that can be created per user * @param src_ip source IPv4 address */ void snat_ipfix_logging_max_entries_per_user (u32 thread_index, u32 limit, u32 src_ip) { //TODO: This event SHOULD be rate limited skip_if_disabled (); snat_ipfix_logging_max_entries_per_usr (thread_index, limit, src_ip, 0); } vlib_frame_t * deterministic_nat_data_callback (flow_report_main_t * frm, flow_report_t * fr, vlib_frame_t * f, u32 * to_next, u32 node_index) { snat_ipfix_flush_from_main(); return f; } /** * @brief Generate maximum session entries exceeded event * * @param thread_index thread index * @param limit configured limit */ void nat_ipfix_logging_max_sessions (u32 thread_index, u32 limit) { //TODO: This event SHOULD be rate limited skip_if_disabled (); nat_ipfix_logging_max_ses (thread_index, limit, 0); } /** * @brief Generate maximum BIB entries exceeded event * * @param thread_index thread index * @param limit configured limit */ void nat_ipfix_logging_max_bibs (u32 thread_index, u32 limit) { //TODO: This event SHOULD be rate limited skip_if_disabled (); nat_ipfix_logging_max_bib (thread_index, limit, 0); } /** * @brief Generate maximum IPv4 fragments pending reassembly exceeded event * * @param thread_index thread index * @param limit configured limit * @param src source IPv4 address */ void nat_ipfix_logging_max_fragments_ip4 (u32 thread_index, u32 limit, ip4_address_t * src) { //TODO: This event SHOULD be rate limited skip_if_disabled (); nat_ipfix_logging_max_frag_ip4 (thread_index, limit, src->as_u32, 0); } /** * @brief Generate maximum IPv6 fragments pending reassembly exceeded event * * @param thread_index thread index * @param limit configured limit * @param src source IPv6 address */ void nat_ipfix_logging_max_fragments_ip6 (u32 thread_index, u32 limit, ip6_address_t * src) { //TODO: This event SHOULD be rate limited skip_if_disabled (); nat_ipfix_logging_max_frag_ip6 (thread_index, limit, src, 0); } /** * @brief Generate NAT64 BIB create and delete events * * @param thread_index thread index * @param src_ip source IPv6 address * @param nat_src_ip transaltes source IPv4 address * @param proto L4 protocol * @param src_port source port * @param nat_src_port translated source port * @param vrf_id VRF ID * @param is_create non-zero value if create event otherwise delete event */ void nat_ipfix_logging_nat64_bib (u32 thread_index, ip6_address_t * src_ip, ip4_address_t * nat_src_ip, u8 proto, u16 src_port, u16 nat_src_port, u32 vrf_id, u8 is_create) { u8 nat_event; skip_if_disabled (); nat_event = is_create ? NAT64_BIB_CREATE : NAT64_BIB_DELETE; nat_ipfix_logging_nat64_bibe (thread_index, nat_event, src_ip, nat_src_ip->as_u32, proto, src_port, nat_src_port, vrf_id, 0); } /** * @brief Generate NAT64 session create and delete events * * @param thread_index thread index * @param src_ip source IPv6 address * @param nat_src_ip transaltes source IPv4 address * @param proto L4 protocol * @param src_port source port * @param nat_src_port translated source port * @param dst_ip destination IPv6 address * @param nat_dst_ip destination IPv4 address * @param dst_port destination port * @param nat_dst_port translated destination port * @param vrf_id VRF ID * @param is_create non-zero value if create event otherwise delete event */ void nat_ipfix_logging_nat64_session (u32 thread_index, ip6_address_t * src_ip, ip4_address_t * nat_src_ip, u8 proto, u16 src_port, u16 nat_src_port, ip6_address_t * dst_ip, ip4_address_t * nat_dst_ip, u16 dst_port, u16 nat_dst_port, u32 vrf_id, u8 is_create) { u8 nat_event; skip_if_disabled (); nat_event = is_create ? NAT64_SESSION_CREATE : NAT64_SESSION_DELETE; nat_ipfix_logging_nat64_ses (thread_index, nat_event, src_ip, nat_src_ip->as_u32, proto, src_port, nat_src_port, dst_ip, nat_dst_ip->as_u32, dst_port, nat_dst_port, vrf_id, 0); } vlib_frame_t * data_callback (flow_report_main_t * frm, flow_report_t * fr, vlib_frame_t * f, u32 * to_next, u32 node_index) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; if (PREDICT_FALSE (++silm->call_counter >= vec_len (frm->reports))) { snat_ipfix_flush_from_main(); silm->call_counter = 0; } return f; } /** * @brief Enable/disable NAT plugin IPFIX logging * * @param enable 1 if enable, 0 if disable * @param domain_id observation domain ID * @param src_port source port number * * @returns 0 if success */ int snat_ipfix_logging_enable_disable (int enable, u32 domain_id, u16 src_port) { snat_main_t *sm = &snat_main; snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; flow_report_main_t *frm = &flow_report_main; vnet_flow_report_add_del_args_t a; int rv; u8 e = enable ? 1 : 0; if (clib_atomic_cmp_and_swap (&silm->enabled, e ^ 1, e) == e) return 0; clib_memset (&a, 0, sizeof (a)); a.is_add = enable; a.domain_id = domain_id ? domain_id : 1; a.src_port = src_port ? src_port : UDP_DST_PORT_ipfix; a.flow_data_callback = data_callback; if (sm->deterministic) { a.rewrite_callback = snat_template_rewrite_max_entries_per_usr; rv = vnet_flow_report_add_del (frm, &a, NULL); if (rv) { nat_log_warn ("vnet_flow_report_add_del returned %d", rv); return -1; } } else { a.rewrite_callback = snat_template_rewrite_nat44_session; rv = vnet_flow_report_add_del (frm, &a, NULL); if (rv) { nat_log_warn ("vnet_flow_report_add_del returned %d", rv); return -1; } a.rewrite_callback = snat_template_rewrite_addr_exhausted; rv = vnet_flow_report_add_del (frm, &a, NULL); if (rv) { nat_log_warn ("vnet_flow_report_add_del returned %d", rv); return -1; } a.rewrite_callback = nat_template_rewrite_max_sessions; rv = vnet_flow_report_add_del (frm, &a, NULL); if (rv) { nat_log_warn ("vnet_flow_report_add_del returned %d", rv); return -1; } a.rewrite_callback = nat_template_rewrite_max_bibs; rv = vnet_flow_report_add_del (frm, &a, NULL); if (rv) { nat_log_warn ("vnet_flow_report_add_del returned %d", rv); return -1; } a.rewrite_callback = nat_template_rewrite_max_frags_ip4; rv = vnet_flow_report_add_del (frm, &a, NULL); if (rv) { nat_log_warn ("vnet_flow_report_add_del returned %d", rv); return -1; } a.rewrite_callback = nat_template_rewrite_max_frags_ip6; rv = vnet_flow_report_add_del (frm, &a, NULL); if (rv) { nat_log_warn ("vnet_flow_report_add_del returned %d", rv); return -1; } a.rewrite_callback = nat_template_rewrite_nat64_bib; rv = vnet_flow_report_add_del (frm, &a, NULL); if (rv) { nat_log_warn ("vnet_flow_report_add_del returned %d", rv); return -1; } a.rewrite_callback = nat_template_rewrite_nat64_session; rv = vnet_flow_report_add_del (frm, &a, NULL); if (rv) { nat_log_warn ("vnet_flow_report_add_del returned %d", rv); return -1; } if (sm->endpoint_dependent) { a.rewrite_callback = snat_template_rewrite_max_entries_per_usr; rv = vnet_flow_report_add_del (frm, &a, NULL); if (rv) { nat_log_warn ("vnet_flow_report_add_del returned %d", rv); return -1; } } } return 0; } /** * @brief Initialize NAT plugin IPFIX logging * * @param vm vlib main */ void snat_ipfix_logging_init (vlib_main_t * vm) { snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; vlib_thread_main_t *tm = vlib_get_thread_main (); silm->enabled = 0; silm->worker_vms = 0; silm->call_counter = 0; /* Set up time reference pair */ silm->vlib_time_0 = vlib_time_now (vm); silm->milisecond_time_0 = unix_time_now_nsec () * 1e-6; vec_validate (silm->per_thread_data, tm->n_vlib_mains - 1); } static uword ipfix_flush_process (vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f) { snat_ipfix_flush(vm->thread_index); return 0; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (snat_ipfix_flush_node) = { .function = ipfix_flush_process, .name = "snat-ipfix-flush", .type = VLIB_NODE_TYPE_INPUT, .state = VLIB_NODE_STATE_INTERRUPT, }; /* *INDENT-ON* */