1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
|
/*
Itay Marom
Hanoh Haim
Cisco Systems, Inc.
*/
/*
Copyright (c) 2015-2015 Cisco Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef __TREX_STREAM_NODE_H__
#define __TREX_STREAM_NODE_H__
#include <bp_sim.h>
#include <stdio.h>
class TrexStatelessDpCore;
class TrexStatelessDpPerPort;
#include <trex_stream.h>
class TrexStatelessCpToDpMsgBase;
class CFlowGenListPerThread;
struct CGenNodeCommand : public CGenNodeBase {
friend class TrexStatelessDpCore;
public:
TrexStatelessCpToDpMsgBase * m_cmd;
uint8_t m_pad_end[104];
public:
void free_command();
} __rte_cache_aligned;;
static_assert(sizeof(CGenNodeCommand) == sizeof(CGenNode), "sizeof(CGenNodeCommand) != sizeof(CGenNode)" );
struct CGenNodeCacheMbuf {
rte_mbuf_t * m_mbuf_const;
rte_mbuf_t * m_array[0];
public:
static uint32_t get_object_size(uint32_t size){
return ( sizeof(CGenNodeCacheMbuf) + sizeof(rte_mbuf_t *) * size );
}
};
/* this is a event for stateless */
struct CGenNodeStateless : public CGenNodeBase {
friend class TrexStatelessDpCore;
public:
/* flags MASKS*/
enum {
SL_NODE_FLAGS_DIR =1, //USED by master
SL_NODE_FLAGS_MBUF_CACHE =2, //USED by master
SL_NODE_CONST_MBUF =4,
SL_NODE_VAR_PKT_SIZE = 8,
SL_NODE_STATS_NEEDED = 0x10,
SL_NODE_CONST_MBUF_CACHE_ARRAY = 0x20 /* array of mbuf - cache */
};
enum {
ss_FREE_RESUSE =1, /* should be free by scheduler */
ss_INACTIVE =2, /* will be active by other stream or stopped */
ss_ACTIVE =3 /* the stream is active */
};
typedef uint8_t stream_state_t ;
static std::string get_stream_state_str(stream_state_t stream_state);
private:
/******************************/
/* cache line 0 */
/* important stuff here R/W */
/******************************/
void * m_cache_mbuf; /* could be an array or a one mbuf */
double m_next_time_offset; /* in sec */
uint16_t m_action_counter;
uint8_t m_stat_hw_id; // hw id used to count rx and tx stats
uint8_t m_null_stream;
uint16_t m_cache_array_cnt;
uint16_t m_pad12;
stream_state_t m_state;
uint8_t m_port_id;
uint8_t m_stream_type; /* see TrexStream::STREAM_TYPE ,stream_type_t */
uint8_t m_pause;
uint32_t m_single_burst; /* the number of bursts in case of burst */
uint32_t m_single_burst_refill;
uint32_t m_multi_bursts; /* in case of multi_burst how many bursts */
/******************************/
/* cache line 1
this cache line should be READONLY ! you can write only at init time */
/******************************/
TrexStream * m_ref_stream_info; /* the stream info */
CGenNodeStateless * m_next_stream;
uint8_t * m_original_packet_data_prefix; /* pointer to the original first pointer 64/128/512 */
/* Fast Field VM section */
uint8_t * m_vm_flow_var; /* pointer to the vm flow var */
uint8_t * m_vm_program; /* pointer to the program */
uint16_t m_vm_program_size; /* up to 64K op codes */
uint16_t m_cache_size; /*RO*/ /* the size of the mbuf array */
uint8_t m_batch_size; /*RO*/ /* the batch size */
uint8_t m_pad4;
uint16_t m_pad5;
/* End Fast Field VM Section */
/* pad to match the size of CGenNode */
uint8_t m_pad_end[20];
public:
uint8_t get_port_id(){
return (m_port_id);
}
/**
* calculate the time offset based
* on the PPS and multiplier
*
*/
void update_rate(double factor) {
/* update the inter packet gap */
m_next_time_offset = m_next_time_offset / factor;
}
/* we restart the stream, schedule it using stream isg */
inline void update_refresh_time(double cur_time){
m_time = cur_time + usec_to_sec(m_ref_stream_info->m_isg_usec) + m_ref_stream_info->m_mc_phase_pre_sec;
}
inline bool is_mask_for_free(){
return (get_state() == CGenNodeStateless::ss_FREE_RESUSE ?true:false);
}
inline void mark_for_free(){
set_state(CGenNodeStateless::ss_FREE_RESUSE);
/* only to be safe */
m_ref_stream_info= NULL;
m_next_stream= NULL;
}
bool is_pause(){
return (m_pause==1?true:false);
}
void set_pause(bool enable){
if ( enable ){
m_pause=1;
}else{
m_pause=0;
}
}
bool is_node_active() {
/* bitwise or - faster instead of two IFs */
return ((m_pause | m_null_stream) == 0);
}
inline uint8_t get_stream_type(){
return (m_stream_type);
}
inline uint32_t get_single_burst_cnt(){
return (m_single_burst);
}
inline double get_multi_ibg_sec(){
return (usec_to_sec(m_ref_stream_info->m_ibg_usec));
}
inline uint32_t get_multi_burst_cnt(){
return (m_multi_bursts);
}
inline void set_state(stream_state_t new_state){
m_state=new_state;
}
inline stream_state_t get_state() {
return m_state;
}
void refresh();
inline void handle_continues(CFlowGenListPerThread *thread) {
if (likely (is_node_active())) {
thread->m_node_gen.m_v_if->send_node( (CGenNode *)this);
}
/* in case of continues */
m_time += m_next_time_offset;
/* insert a new event */
thread->m_node_gen.m_p_queue.push( (CGenNode *)this);
}
inline void handle_multi_burst(CFlowGenListPerThread *thread) {
if (likely (is_node_active())) {
thread->m_node_gen.m_v_if->send_node( (CGenNode *)this);
}
m_single_burst--;
if (m_single_burst > 0 ) {
/* in case of continues */
m_time += m_next_time_offset;
thread->m_node_gen.m_p_queue.push( (CGenNode *)this);
}else{
m_multi_bursts--;
if ( m_multi_bursts == 0 ) {
set_state(CGenNodeStateless::ss_INACTIVE);
if ( thread->set_stateless_next_node(this,m_next_stream) ){
/* update the next stream time using isg and post phase */
m_next_stream->update_refresh_time(m_time + m_ref_stream_info->get_next_stream_delay_sec());
thread->m_node_gen.m_p_queue.push( (CGenNode *)m_next_stream);
}else{
// in case of zero we will schedule a command to stop
// will be called from set_stateless_next_node
}
}else{
/* next burst is like starting a new stream - add pre and post phase */
m_time += m_ref_stream_info->get_next_burst_delay_sec();
m_single_burst = m_single_burst_refill;
thread->m_node_gen.m_p_queue.push( (CGenNode *)this);
}
}
}
/**
* main function to handle an event of a packet tx
*
*
*
*/
inline void handle(CFlowGenListPerThread *thread) {
if (m_stream_type == TrexStream::stCONTINUOUS ) {
handle_continues(thread) ;
}else{
if (m_stream_type == TrexStream::stMULTI_BURST) {
handle_multi_burst(thread);
}else{
assert(0);
}
}
}
void set_socket_id(socket_id_t socket){
m_socket_id=socket;
}
socket_id_t get_socket_id(){
return ( m_socket_id );
}
void set_stat_hw_id(uint16_t hw_id) {
m_stat_hw_id = hw_id;
}
socket_id_t get_stat_hw_id() {
return ( m_stat_hw_id );
}
inline void set_stat_needed() {
m_flags |= SL_NODE_STATS_NEEDED;
}
inline bool is_stat_needed() {
return ((m_flags & SL_NODE_STATS_NEEDED) != 0);
}
inline void set_mbuf_cache_dir(pkt_dir_t dir){
if (dir) {
m_flags |=NODE_FLAGS_DIR;
}else{
m_flags &=~NODE_FLAGS_DIR;
}
}
inline pkt_dir_t get_mbuf_cache_dir(){
return ((pkt_dir_t)( m_flags &1));
}
inline void set_cache_mbuf(rte_mbuf_t * m){
m_cache_mbuf=(void *)m;
m_flags |= NODE_FLAGS_MBUF_CACHE;
}
inline rte_mbuf_t * get_cache_mbuf(){
if ( m_flags & NODE_FLAGS_MBUF_CACHE ) {
return ((rte_mbuf_t *)m_cache_mbuf);
}else{
return ((rte_mbuf_t *)0);
}
}
inline void set_var_pkt_size(){
m_flags |= SL_NODE_VAR_PKT_SIZE;
}
inline bool is_var_pkt_size(){
return ( ( m_flags &SL_NODE_VAR_PKT_SIZE )?true:false);
}
inline void set_const_mbuf(rte_mbuf_t * m){
m_cache_mbuf=(void *)m;
m_flags |= SL_NODE_CONST_MBUF;
}
inline rte_mbuf_t * get_const_mbuf(){
if ( m_flags &SL_NODE_CONST_MBUF ) {
return ((rte_mbuf_t *)m_cache_mbuf);
}else{
return ((rte_mbuf_t *)0);
}
}
void clear_const_mbuf(){
m_flags= ( m_flags & ~SL_NODE_CONST_MBUF );
}
/* prefix header exits only in non cache mode size is 64/128/512 other are not possible right now */
inline void alloc_prefix_header(uint16_t size){
set_prefix_header_size(size);
m_original_packet_data_prefix = (uint8_t *)malloc(size);
assert(m_original_packet_data_prefix);
}
inline void free_prefix_header(){
if (m_original_packet_data_prefix) {
free(m_original_packet_data_prefix);
m_original_packet_data_prefix=0;
}
}
/* prefix headr could be 64/128/512 */
inline void set_prefix_header_size(uint16_t size){
m_src_port=size;
}
inline uint16_t prefix_header_size(){
return (m_src_port);
}
rte_mbuf_t * alloc_flow_stat_mbuf(rte_mbuf_t *m, struct flow_stat_payload_header * &fsp_head
, bool is_const);
bool alloc_flow_stat_mbuf_test_const();
rte_mbuf_t * alloc_node_with_vm();
void free_stl_node();
protected:
void free_stl_vm_buf();
public:
void cache_mbuf_array_init();
inline bool is_cache_mbuf_array(){
return ( m_flags & SL_NODE_CONST_MBUF_CACHE_ARRAY ? true:false );
}
void cache_mbuf_array_copy(CGenNodeCacheMbuf *obj,uint16_t size);
rte_mbuf_t ** cache_mbuf_array_alloc(uint16_t size);
void cache_mbuf_array_free();
void cache_mbuf_array_set(uint16_t index,rte_mbuf_t * m);
void cache_mbuf_array_set_const_mbuf(rte_mbuf_t * m);
rte_mbuf_t * cache_mbuf_array_get_const_mbuf();
rte_mbuf_t * cache_mbuf_array_get(uint16_t index);
rte_mbuf_t * cache_mbuf_array_get_cur(void){
CGenNodeCacheMbuf *p =(CGenNodeCacheMbuf *) m_cache_mbuf;
rte_mbuf_t * m=p->m_array[m_cache_array_cnt];
assert(m);
m_cache_array_cnt++;
if (m_cache_array_cnt == m_cache_size) {
m_cache_array_cnt=0;
}
return m;
}
public:
/* debug functions */
int get_stream_id();
static void DumpHeader(FILE *fd);
void Dump(FILE *fd);
private:
void generate_random_seed();
void refresh_vm_bss();
void set_random_seed(uint32_t seed){
uint32_t *p=get_random_bss_seed_memory();
*p=seed;
}
uint32_t* get_random_bss_seed_memory(){
return (uint32_t*)m_vm_flow_var;/* always the first 4 bytes */
}
} __rte_cache_aligned;
static_assert(sizeof(CGenNodeStateless) == sizeof(CGenNode), "sizeof(CGenNodeStateless) != sizeof(CGenNode)" );
/* this is a event for PCAP transmitting */
struct CGenNodePCAP : public CGenNodeBase {
friend class TrexStatelessDpPerPort;
public:
/**
* creates a node from a PCAP file
*/
bool create(uint8_t port_id,
pkt_dir_t dir,
socket_id_t socket_id,
const uint8_t *mac_addr,
const uint8_t *slave_mac_addr,
const std::string &pcap_filename,
double ipg_usec,
double speedup,
uint32_t count,
bool is_dual);
/**
* destroy the node cleaning up any data
*
*/
void destroy();
bool is_dual() const {
return m_is_dual;
}
/**
* advance - will read the next packet
*
* @author imarom (03-May-16)
*/
void next() {
assert(is_active());
/* save the previous packet time */
m_last_pkt_time = m_raw_packet->get_time();
/* advance */
if ( m_reader->ReadPacket(m_raw_packet) == false ){
m_count--;
/* if its the end - go home... */
if (m_count == 0) {
m_state = PCAP_INACTIVE;
return;
}
/* rewind and load the first packet */
m_reader->Rewind();
if (!m_reader->ReadPacket(m_raw_packet)) {
m_state = PCAP_INACTIVE;
return;
}
}
/* update the packet dir if needed */
update_pkt_dir();
}
inline void update_pkt_dir() {
/* if dual mode and the interface is odd - swap the dir */
if (is_dual()) {
pkt_dir_t dir = (m_raw_packet->getInterface() & 0x1) ? (m_dir ^ 0x1) : m_dir;
set_mbuf_dir(dir);
}
}
/**
* return the time for the next scheduling for a packet
*
*/
inline double get_ipg() {
assert(m_state != PCAP_INVALID);
/* fixed IPG */
if (m_ipg_sec != -1) {
return m_ipg_sec;
} else {
return ((m_raw_packet->get_time() - m_last_pkt_time) / m_speedup);
}
}
/**
* get the current packet as MBUF
*
*/
inline rte_mbuf_t *get_pkt() {
assert(m_state != PCAP_INVALID);
rte_mbuf_t *m = CGlobalInfo::pktmbuf_alloc( get_socket_id(), m_raw_packet->getTotalLen());
assert(m);
char *p = rte_pktmbuf_append(m, m_raw_packet->getTotalLen());
assert(p);
/* copy the packet */
memcpy(p, m_raw_packet->raw, m_raw_packet->getTotalLen());
/* fix the MAC */
if (get_mbuf_dir() == m_dir) {
memcpy(p, m_mac_addr, 12);
} else {
memcpy(p, m_slave_mac_addr, 12);
}
return (m);
}
inline void handle(CFlowGenListPerThread *thread) {
assert(m_state != PCAP_INVALID);
thread->m_node_gen.m_v_if->send_node( (CGenNode *)this);
// read the next packet
next();
if (is_active()) {
m_time += get_ipg();
thread->m_node_gen.m_p_queue.push((CGenNode *)this);
} else {
thread->stop_stateless_traffic(get_port_id());
}
}
void set_mbuf_dir(pkt_dir_t dir) {
if (dir) {
m_flags |=NODE_FLAGS_DIR;
}else{
m_flags &=~NODE_FLAGS_DIR;
}
}
inline pkt_dir_t get_mbuf_dir(){
return ((pkt_dir_t)( m_flags &1));
}
uint8_t get_port_id() {
return m_port_id;
}
void mark_for_free() {
m_state = PCAP_MARKED_FOR_FREE;
}
bool is_active() {
return (m_state == PCAP_ACTIVE);
}
bool is_marked_for_free() {
return (m_state == PCAP_MARKED_FOR_FREE);
}
private:
enum {
PCAP_INVALID = 0,
PCAP_ACTIVE,
PCAP_INACTIVE,
PCAP_MARKED_FOR_FREE
};
/* cache line 0 */
/* important stuff here */
uint8_t m_mac_addr[12];
uint8_t m_slave_mac_addr[12];
uint8_t m_state;
pkt_dir_t m_dir;
double m_last_pkt_time;
double m_speedup;
double m_ipg_sec;
uint32_t m_count;
double m_next_time_offset; /* in sec */
CCapReaderBase *m_reader;
CCapPktRaw *m_raw_packet;
uint8_t m_port_id;
bool m_is_dual;
/* pad to match the size of CGenNode */
uint8_t m_pad_end[19];
} __rte_cache_aligned;
static_assert(sizeof(CGenNodePCAP) == sizeof(CGenNode), "sizeof(CGenNodePCAP) != sizeof(CGenNode)" );
#endif /* __TREX_STREAM_NODE_H__ */
|