From ef209c347bfb99ebe03a0ff2c7947a6ebbe03baf Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 25 Jun 2015 10:51:15 +0300 Subject: first public version --- b | 6 + images/40425209_l.jpg | Bin 0 -> 340883 bytes images/Intel520.png | Bin 0 -> 34642 bytes images/T-Rex_vm.png | Bin 0 -> 586111 bytes images/Thumbs.db | Bin 0 -> 84992 bytes images/TrexConfig.png | Bin 0 -> 57986 bytes images/TrexConfig_switch.png | Bin 0 -> 75038 bytes images/TrexViewer.png | Bin 0 -> 65175 bytes images/cisco.png | Bin 0 -> 3330 bytes images/dns_wireshark.png | Bin 0 -> 12271 bytes images/icons/README | 5 + images/icons/Thumbs.db | Bin 0 -> 27136 bytes images/icons/callouts/1.png | Bin 0 -> 329 bytes images/icons/callouts/10.png | Bin 0 -> 361 bytes images/icons/callouts/11.png | Bin 0 -> 565 bytes images/icons/callouts/12.png | Bin 0 -> 617 bytes images/icons/callouts/13.png | Bin 0 -> 623 bytes images/icons/callouts/14.png | Bin 0 -> 411 bytes images/icons/callouts/15.png | Bin 0 -> 640 bytes images/icons/callouts/2.png | Bin 0 -> 353 bytes images/icons/callouts/3.png | Bin 0 -> 350 bytes images/icons/callouts/4.png | Bin 0 -> 345 bytes images/icons/callouts/5.png | Bin 0 -> 348 bytes images/icons/callouts/6.png | Bin 0 -> 355 bytes images/icons/callouts/7.png | Bin 0 -> 344 bytes images/icons/callouts/8.png | Bin 0 -> 357 bytes images/icons/callouts/9.png | Bin 0 -> 357 bytes images/icons/callouts/Thumbs.db | Bin 0 -> 16896 bytes images/icons/caution.png | Bin 0 -> 2734 bytes images/icons/example.png | Bin 0 -> 2599 bytes images/icons/home.png | Bin 0 -> 1340 bytes images/icons/important.png | Bin 0 -> 2980 bytes images/icons/next.png | Bin 0 -> 1302 bytes images/icons/note.png | Bin 0 -> 2494 bytes images/icons/prev.png | Bin 0 -> 1348 bytes images/icons/tip.png | Bin 0 -> 2718 bytes images/icons/up.png | Bin 0 -> 1320 bytes images/icons/warning.png | Bin 0 -> 3214 bytes images/ip_allocation.png | Bin 0 -> 17687 bytes images/loopback_right.png | Bin 0 -> 96274 bytes images/loopback_wrong.png | Bin 0 -> 97046 bytes images/sfr_profile.png | Bin 0 -> 16776 bytes images/small.jpg | Bin 0 -> 6943 bytes images/smallnew.png | Bin 0 -> 292 bytes images/trex2.png | Bin 0 -> 59226 bytes images/trex_algo.png | Bin 0 -> 24137 bytes images/trex_auto_script.jpg | Bin 0 -> 201947 bytes images/trex_control_plane_modules.png | Bin 0 -> 455128 bytes images/trex_desing.png | Bin 0 -> 17994 bytes images/trex_generator_1.PNG | Bin 0 -> 24213 bytes images/trex_logo.png | Bin 0 -> 56958 bytes images/trex_logo_64_64.png | Bin 0 -> 27300 bytes images/trex_logo_green_small.png | Bin 0 -> 55947 bytes images/trex_model.png | Bin 0 -> 98328 bytes images/trex_motinor_config.png | Bin 0 -> 21573 bytes images/trex_motinor_view.png | Bin 0 -> 168606 bytes images/trex_sfr_profile.png | Bin 0 -> 41114 bytes images/trex_vm_bios_err.png | Bin 0 -> 151827 bytes images/trex_vm_login.png | Bin 0 -> 49907 bytes images/trex_vm_run.png | Bin 0 -> 19953 bytes images/ucs200_2.png | Bin 0 -> 23528 bytes images/vm_import.png | Bin 0 -> 105388 bytes images/vm_selection_screen.png | Bin 0 -> 66700 bytes my_chart.js | 84 + release_notes.asciidoc | 25 + symbols.lang | 5 + trex_book-docinfo.html | 22 + trex_book.asciidoc | 1232 +++++++++++ trex_book_basic.asciidoc | 3308 +++++++++++++++++++++++++++++ trex_config.asciidoc | 299 +++ trex_control_plane_design_phase1.asciidoc | 516 +++++ trex_control_plane_peek.asciidoc | 225 ++ trex_preso.asciidoc | 1312 ++++++++++++ trex_vm_manual.asciidoc | 324 +++ waf | Bin 0 -> 90909 bytes waf.css | 39 + wscript | 180 ++ 77 files changed, 7582 insertions(+) create mode 100755 b create mode 100755 images/40425209_l.jpg create mode 100755 images/Intel520.png create mode 100755 images/T-Rex_vm.png create mode 100755 images/Thumbs.db create mode 100755 images/TrexConfig.png create mode 100755 images/TrexConfig_switch.png create mode 100755 images/TrexViewer.png create mode 100755 images/cisco.png create mode 100755 images/dns_wireshark.png create mode 100755 images/icons/README create mode 100755 images/icons/Thumbs.db create mode 100755 images/icons/callouts/1.png create mode 100755 images/icons/callouts/10.png create mode 100755 images/icons/callouts/11.png create mode 100755 images/icons/callouts/12.png create mode 100755 images/icons/callouts/13.png create mode 100755 images/icons/callouts/14.png create mode 100755 images/icons/callouts/15.png create mode 100755 images/icons/callouts/2.png create mode 100755 images/icons/callouts/3.png create mode 100755 images/icons/callouts/4.png create mode 100755 images/icons/callouts/5.png create mode 100755 images/icons/callouts/6.png create mode 100755 images/icons/callouts/7.png create mode 100755 images/icons/callouts/8.png create mode 100755 images/icons/callouts/9.png create mode 100755 images/icons/callouts/Thumbs.db create mode 100755 images/icons/caution.png create mode 100755 images/icons/example.png create mode 100755 images/icons/home.png create mode 100755 images/icons/important.png create mode 100755 images/icons/next.png create mode 100755 images/icons/note.png create mode 100755 images/icons/prev.png create mode 100755 images/icons/tip.png create mode 100755 images/icons/up.png create mode 100755 images/icons/warning.png create mode 100755 images/ip_allocation.png create mode 100755 images/loopback_right.png create mode 100755 images/loopback_wrong.png create mode 100755 images/sfr_profile.png create mode 100755 images/small.jpg create mode 100755 images/smallnew.png create mode 100755 images/trex2.png create mode 100755 images/trex_algo.png create mode 100755 images/trex_auto_script.jpg create mode 100755 images/trex_control_plane_modules.png create mode 100755 images/trex_desing.png create mode 100755 images/trex_generator_1.PNG create mode 100755 images/trex_logo.png create mode 100755 images/trex_logo_64_64.png create mode 100755 images/trex_logo_green_small.png create mode 100755 images/trex_model.png create mode 100755 images/trex_motinor_config.png create mode 100755 images/trex_motinor_view.png create mode 100755 images/trex_sfr_profile.png create mode 100755 images/trex_vm_bios_err.png create mode 100755 images/trex_vm_login.png create mode 100755 images/trex_vm_run.png create mode 100755 images/ucs200_2.png create mode 100755 images/vm_import.png create mode 100755 images/vm_selection_screen.png create mode 100755 my_chart.js create mode 100755 release_notes.asciidoc create mode 100755 symbols.lang create mode 100755 trex_book-docinfo.html create mode 100755 trex_book.asciidoc create mode 100755 trex_book_basic.asciidoc create mode 100755 trex_config.asciidoc create mode 100755 trex_control_plane_design_phase1.asciidoc create mode 100755 trex_control_plane_peek.asciidoc create mode 100755 trex_preso.asciidoc create mode 100755 trex_vm_manual.asciidoc create mode 100755 waf create mode 100755 waf.css create mode 100755 wscript diff --git a/b b/b new file mode 100755 index 00000000..52dfa76f --- /dev/null +++ b/b @@ -0,0 +1,6 @@ +#! /bin/bash +python waf $@ +sts=$? +exit $sts + + diff --git a/images/40425209_l.jpg b/images/40425209_l.jpg new file mode 100755 index 00000000..60bb3430 Binary files /dev/null and b/images/40425209_l.jpg differ diff --git a/images/Intel520.png b/images/Intel520.png new file mode 100755 index 00000000..ad67f3ec Binary files /dev/null and b/images/Intel520.png differ diff --git a/images/T-Rex_vm.png b/images/T-Rex_vm.png new file mode 100755 index 00000000..53340b9a Binary files /dev/null and b/images/T-Rex_vm.png differ diff --git a/images/Thumbs.db b/images/Thumbs.db new file mode 100755 index 00000000..b775040b Binary files /dev/null and b/images/Thumbs.db differ diff --git a/images/TrexConfig.png b/images/TrexConfig.png new file mode 100755 index 00000000..1b956276 Binary files /dev/null and b/images/TrexConfig.png differ diff --git a/images/TrexConfig_switch.png b/images/TrexConfig_switch.png new file mode 100755 index 00000000..245a849b Binary files /dev/null and b/images/TrexConfig_switch.png differ diff --git a/images/TrexViewer.png b/images/TrexViewer.png new file mode 100755 index 00000000..ad76dcc1 Binary files /dev/null and b/images/TrexViewer.png differ diff --git a/images/cisco.png b/images/cisco.png new file mode 100755 index 00000000..1a2b826c Binary files /dev/null and b/images/cisco.png differ diff --git a/images/dns_wireshark.png b/images/dns_wireshark.png new file mode 100755 index 00000000..2d8010fc Binary files /dev/null and b/images/dns_wireshark.png differ diff --git a/images/icons/README b/images/icons/README new file mode 100755 index 00000000..f12b2a73 --- /dev/null +++ b/images/icons/README @@ -0,0 +1,5 @@ +Replaced the plain DocBook XSL admonition icons with Jimmac's DocBook +icons (http://jimmac.musichall.cz/ikony.php3). I dropped transparency +from the Jimmac icons to get round MS IE and FOP PNG incompatibilies. + +Stuart Rackham diff --git a/images/icons/Thumbs.db b/images/icons/Thumbs.db new file mode 100755 index 00000000..97d755f2 Binary files /dev/null and b/images/icons/Thumbs.db differ diff --git a/images/icons/callouts/1.png b/images/icons/callouts/1.png new file mode 100755 index 00000000..7d473430 Binary files /dev/null and b/images/icons/callouts/1.png differ diff --git a/images/icons/callouts/10.png b/images/icons/callouts/10.png new file mode 100755 index 00000000..997bbc82 Binary files /dev/null and b/images/icons/callouts/10.png differ diff --git a/images/icons/callouts/11.png b/images/icons/callouts/11.png new file mode 100755 index 00000000..ce47dac3 Binary files /dev/null and b/images/icons/callouts/11.png differ diff --git a/images/icons/callouts/12.png b/images/icons/callouts/12.png new file mode 100755 index 00000000..31daf4e2 Binary files /dev/null and b/images/icons/callouts/12.png differ diff --git a/images/icons/callouts/13.png b/images/icons/callouts/13.png new file mode 100755 index 00000000..14021a89 Binary files /dev/null and b/images/icons/callouts/13.png differ diff --git a/images/icons/callouts/14.png b/images/icons/callouts/14.png new file mode 100755 index 00000000..64014b75 Binary files /dev/null and b/images/icons/callouts/14.png differ diff --git a/images/icons/callouts/15.png b/images/icons/callouts/15.png new file mode 100755 index 00000000..0d65765f Binary files /dev/null and b/images/icons/callouts/15.png differ diff --git a/images/icons/callouts/2.png b/images/icons/callouts/2.png new file mode 100755 index 00000000..5d09341b Binary files /dev/null and b/images/icons/callouts/2.png differ diff --git a/images/icons/callouts/3.png b/images/icons/callouts/3.png new file mode 100755 index 00000000..ef7b7004 Binary files /dev/null and b/images/icons/callouts/3.png differ diff --git a/images/icons/callouts/4.png b/images/icons/callouts/4.png new file mode 100755 index 00000000..adb8364e Binary files /dev/null and b/images/icons/callouts/4.png differ diff --git a/images/icons/callouts/5.png b/images/icons/callouts/5.png new file mode 100755 index 00000000..4d7eb460 Binary files /dev/null and b/images/icons/callouts/5.png differ diff --git a/images/icons/callouts/6.png b/images/icons/callouts/6.png new file mode 100755 index 00000000..0ba694af Binary files /dev/null and b/images/icons/callouts/6.png differ diff --git a/images/icons/callouts/7.png b/images/icons/callouts/7.png new file mode 100755 index 00000000..472e96f8 Binary files /dev/null and b/images/icons/callouts/7.png differ diff --git a/images/icons/callouts/8.png b/images/icons/callouts/8.png new file mode 100755 index 00000000..5e60973c Binary files /dev/null and b/images/icons/callouts/8.png differ diff --git a/images/icons/callouts/9.png b/images/icons/callouts/9.png new file mode 100755 index 00000000..a0676d26 Binary files /dev/null and b/images/icons/callouts/9.png differ diff --git a/images/icons/callouts/Thumbs.db b/images/icons/callouts/Thumbs.db new file mode 100755 index 00000000..2312a5bb Binary files /dev/null and b/images/icons/callouts/Thumbs.db differ diff --git a/images/icons/caution.png b/images/icons/caution.png new file mode 100755 index 00000000..9a8c515a Binary files /dev/null and b/images/icons/caution.png differ diff --git a/images/icons/example.png b/images/icons/example.png new file mode 100755 index 00000000..1199e864 Binary files /dev/null and b/images/icons/example.png differ diff --git a/images/icons/home.png b/images/icons/home.png new file mode 100755 index 00000000..37a5231b Binary files /dev/null and b/images/icons/home.png differ diff --git a/images/icons/important.png b/images/icons/important.png new file mode 100755 index 00000000..be685cc4 Binary files /dev/null and b/images/icons/important.png differ diff --git a/images/icons/next.png b/images/icons/next.png new file mode 100755 index 00000000..64e126bd Binary files /dev/null and b/images/icons/next.png differ diff --git a/images/icons/note.png b/images/icons/note.png new file mode 100755 index 00000000..7c1f3e2f Binary files /dev/null and b/images/icons/note.png differ diff --git a/images/icons/prev.png b/images/icons/prev.png new file mode 100755 index 00000000..3e8f12fe Binary files /dev/null and b/images/icons/prev.png differ diff --git a/images/icons/tip.png b/images/icons/tip.png new file mode 100755 index 00000000..f087c73b Binary files /dev/null and b/images/icons/tip.png differ diff --git a/images/icons/up.png b/images/icons/up.png new file mode 100755 index 00000000..2db1ce62 Binary files /dev/null and b/images/icons/up.png differ diff --git a/images/icons/warning.png b/images/icons/warning.png new file mode 100755 index 00000000..d41edb9a Binary files /dev/null and b/images/icons/warning.png differ diff --git a/images/ip_allocation.png b/images/ip_allocation.png new file mode 100755 index 00000000..023706ef Binary files /dev/null and b/images/ip_allocation.png differ diff --git a/images/loopback_right.png b/images/loopback_right.png new file mode 100755 index 00000000..1891d25a Binary files /dev/null and b/images/loopback_right.png differ diff --git a/images/loopback_wrong.png b/images/loopback_wrong.png new file mode 100755 index 00000000..92602a86 Binary files /dev/null and b/images/loopback_wrong.png differ diff --git a/images/sfr_profile.png b/images/sfr_profile.png new file mode 100755 index 00000000..da9e5e62 Binary files /dev/null and b/images/sfr_profile.png differ diff --git a/images/small.jpg b/images/small.jpg new file mode 100755 index 00000000..1557f126 Binary files /dev/null and b/images/small.jpg differ diff --git a/images/smallnew.png b/images/smallnew.png new file mode 100755 index 00000000..411c2e17 Binary files /dev/null and b/images/smallnew.png differ diff --git a/images/trex2.png b/images/trex2.png new file mode 100755 index 00000000..2bed6ab3 Binary files /dev/null and b/images/trex2.png differ diff --git a/images/trex_algo.png b/images/trex_algo.png new file mode 100755 index 00000000..6da98473 Binary files /dev/null and b/images/trex_algo.png differ diff --git a/images/trex_auto_script.jpg b/images/trex_auto_script.jpg new file mode 100755 index 00000000..f4cdb109 Binary files /dev/null and b/images/trex_auto_script.jpg differ diff --git a/images/trex_control_plane_modules.png b/images/trex_control_plane_modules.png new file mode 100755 index 00000000..c4978e38 Binary files /dev/null and b/images/trex_control_plane_modules.png differ diff --git a/images/trex_desing.png b/images/trex_desing.png new file mode 100755 index 00000000..64b3ed1c Binary files /dev/null and b/images/trex_desing.png differ diff --git a/images/trex_generator_1.PNG b/images/trex_generator_1.PNG new file mode 100755 index 00000000..8b0d57a8 Binary files /dev/null and b/images/trex_generator_1.PNG differ diff --git a/images/trex_logo.png b/images/trex_logo.png new file mode 100755 index 00000000..3bf29278 Binary files /dev/null and b/images/trex_logo.png differ diff --git a/images/trex_logo_64_64.png b/images/trex_logo_64_64.png new file mode 100755 index 00000000..286228b6 Binary files /dev/null and b/images/trex_logo_64_64.png differ diff --git a/images/trex_logo_green_small.png b/images/trex_logo_green_small.png new file mode 100755 index 00000000..0cc7221a Binary files /dev/null and b/images/trex_logo_green_small.png differ diff --git a/images/trex_model.png b/images/trex_model.png new file mode 100755 index 00000000..1bdcfca4 Binary files /dev/null and b/images/trex_model.png differ diff --git a/images/trex_motinor_config.png b/images/trex_motinor_config.png new file mode 100755 index 00000000..70f705ce Binary files /dev/null and b/images/trex_motinor_config.png differ diff --git a/images/trex_motinor_view.png b/images/trex_motinor_view.png new file mode 100755 index 00000000..855dc027 Binary files /dev/null and b/images/trex_motinor_view.png differ diff --git a/images/trex_sfr_profile.png b/images/trex_sfr_profile.png new file mode 100755 index 00000000..f90caa42 Binary files /dev/null and b/images/trex_sfr_profile.png differ diff --git a/images/trex_vm_bios_err.png b/images/trex_vm_bios_err.png new file mode 100755 index 00000000..3ac2da5d Binary files /dev/null and b/images/trex_vm_bios_err.png differ diff --git a/images/trex_vm_login.png b/images/trex_vm_login.png new file mode 100755 index 00000000..388ee4ba Binary files /dev/null and b/images/trex_vm_login.png differ diff --git a/images/trex_vm_run.png b/images/trex_vm_run.png new file mode 100755 index 00000000..86d1cf19 Binary files /dev/null and b/images/trex_vm_run.png differ diff --git a/images/ucs200_2.png b/images/ucs200_2.png new file mode 100755 index 00000000..52cb10f4 Binary files /dev/null and b/images/ucs200_2.png differ diff --git a/images/vm_import.png b/images/vm_import.png new file mode 100755 index 00000000..e3b28647 Binary files /dev/null and b/images/vm_import.png differ diff --git a/images/vm_selection_screen.png b/images/vm_selection_screen.png new file mode 100755 index 00000000..a2c8e19b Binary files /dev/null and b/images/vm_selection_screen.png differ diff --git a/my_chart.js b/my_chart.js new file mode 100755 index 00000000..41851e0f --- /dev/null +++ b/my_chart.js @@ -0,0 +1,84 @@ +function chart(id,data,data_names,xlabel,ylabel){ + +var margin = {_myt: 20, _right: 20, _bottom: 30, _left: 40}; + +var width = 960 - margin._left - margin._right; +var height = 500 - margin._myt - margin._bottom; + +var x = d3.scale.linear() + .range([0, width]); + +var y = d3.scale.linear() + .range([height, 0]); + +var color = d3.scale.category10(); + +var xAxis = d3.svg.axis() + .scale(x) + .orient("bottom"); + +var yAxis = d3.svg.axis() + .scale(y) + .orient("left"); + +var svg = d3.select(id).append("svg") + .attr("width", width + margin._left + margin._right) + .attr("height", height + margin._myt + margin._bottom) + .append("g") + .attr("transform", "translate(" + margin._left + "," + margin._myt + ")"); + +x.domain(d3.extent(data, function(d) { return d[0] })).nice(); +y.domain(d3.extent(data, function(d) { return d[1] })).nice(); + +svg.append("g") + .attr("class", "x axis") + .attr("transform", "translate(0," + height + ")") + .call(xAxis) + .append("text") + .attr("class", "label") + .attr("x", width) + .attr("y", -6) + .style("text-anchor", "end") + .text(xlabel); + +svg.append("g") + .attr("class", "y axis") + .call(yAxis) + .append("text") + .attr("class", "label") + .attr("transform", "rotate(-90)") + .attr("y", 6) + .attr("dy", ".71em") + .style("text-anchor", "end") + .text(ylabel) + +svg.selectAll(".dot") + .data(data) + .enter().append("circle") + .attr("class", "dot") + .attr("r", 3.5) + .attr("cx", function(d) { return x(d[0]); }) + .attr("cy", function(d) { return y(d[1]); }) + .style("fill", function(d) { return color(d[2]); }); + +var legend = svg.selectAll(".legend") + .data(color.domain()) + .enter().append("g") + .attr("class", "legend") + .attr("transform", function(d, i) { return "translate(0," + i * 20 + ")"; }); + +legend.append("rect") + .attr("x", width - 18) + .attr("width", 18) + .attr("height", 18) + .style("fill", color); + +legend.append("text") + .attr("x", width - 24) + .attr("y", 9) + .attr("dy", ".35em") + .style("text-anchor", "end") + .text(function(d) { return data_names[d]; }); + +} + diff --git a/release_notes.asciidoc b/release_notes.asciidoc new file mode 100755 index 00000000..4300ba06 --- /dev/null +++ b/release_notes.asciidoc @@ -0,0 +1,25 @@ +:author: hhaim +:email: + + +ifndef::backend-docbook[] +++++++++++++++ +
+ Smiley face +
+









+++++++++++++++ + +== TRex release notes == +endif::backend-docbook[] + +ifdef::backend-docbook[] + +== TRex release notes == +:numbered: + +endif::backend-docbook[] + +== Release 1.72 == + + diff --git a/symbols.lang b/symbols.lang new file mode 100755 index 00000000..38ac4e1c --- /dev/null +++ b/symbols.lang @@ -0,0 +1,5 @@ +co_symbol = "<1>","<2>","<3>","<4>","<5>","<6>","<7>","<8>", + "<9>","<10>","<11>","<12>","<13>","<14>","<15>" +symbol = "~","!","%","^","*","(",")","-","+","=","[", + "]","\\",":",";",",",".","/","?","&","<",">","\|" + diff --git a/trex_book-docinfo.html b/trex_book-docinfo.html new file mode 100755 index 00000000..a444f506 --- /dev/null +++ b/trex_book-docinfo.html @@ -0,0 +1,22 @@ + + + + + + + + + + + diff --git a/trex_book.asciidoc b/trex_book.asciidoc new file mode 100755 index 00000000..5e381356 --- /dev/null +++ b/trex_book.asciidoc @@ -0,0 +1,1232 @@ +TRex +==== +:author: hhaim +:email: +:revnumber: 1.70-0.0 +:quotes.++: +:numbered: +:web_server_url: http://trex-tgn.cisco.com/trex + + + + +== Introduction + +=== A word on traffic generators + +Traditionally, routers have been tested using commercial traffic generators, while performance +typically has been measured using packets per second (PPS) metrics. As router functionality and +services have become more complex, stateful traffic generators have become necessary to +provide more realistic application traffic scenarios. +The advantages of realistic traffic generators are: + +* Providing more accurate performance numbers +* Finding real bottlenecks + +==== Current Challenges: + +* *Cost* : Commercial State-full traffic generators are expensive +* *Scale* : Bandwidth does not scale up well with features complexity +* *Standardization* : Lack of standardization of traffic patterns and methodologies +* *Flexibility* : Commercial tools do not allow agility when flexibility and changes are needed + +==== Implications + +* High capital expenditure (capex) spent by different teams +* Testing in low scale and extrapolation became a common practice, it is not accurate, and hides real life bottlenecks and quality issues +* Different feature / platform teams benchmark and results methodology +* Delays in development and testing due to testing tools features dependency +* Resource and effort investment in developing different ad hoc tools and test methodologies + +=== Overview of TRex + +TRex addresses these problems through an innovative and extendable software implementation and by leveraging standard and open SW and x86/UCS HW. + +* Generates and analyzes L4-7 traffic and able to provide in one tool capabilities provided by commercial L7 tools. +* Stateful traffic generator based on pre-processing and smart replay of real traffic templates. +* Generates and *amplifies* both client and server side traffic. +* Customized functionality can be added. +* Scale to 200Gb/sec for one UCS ( using Intel 40Gb/sec NICS) +* Low cost +* Virtual interfaces support, enable TRex to be used in a fully virtual environment without physical NICs and the following example use cases: +** Amazon AWS +** Cisco LaaS +** TRex on your laptop +** Self-contained packaging that can be easily installed and deployed + + +.TRex Hardware +[options="header",cols="1^,1^"] +|================= +|Cisco UCS Platform | Intel NIC +| image:images/ucs200_2.png[title="generator"] | image:images/Intel520.png[title="generator"] +|================= + +=== Purpose of this guide + +This guide explains the use of TRex internals and the use of TRex in conjunction with Cisco ASR1000 Series routers. The examples illustrate novel traffic generation techniques made possible by TRex. + +== Download and installation + +=== Hardware recommendation + +TRex operates in a Linux application environment, interacting with Linux kernel modules. +TRex curretly works on x86 architecture and can operates well on Cisco UCS hardware. The following platforms have been tested and are recommended for operating TRex. + +[NOTE] +===================================== + A high-end UCS platform is not required for operating TRex in its current version, but may be required for future versions. +===================================== + + +.Preferred UCS +[options="header",cols="1,3"] +|================= +| UCS Type | Comments +| UCS C220 M3/M4 | Supports up to 40Gb/sec with 540-D2 and with newer Intel NIC 80Gb/sec with 1RU, recommended +| UCS C200| Early UCS model +| UCS C210 M2 | Supports up to 40Gb/sec PCIe3.0 +| UCS C240 M3 | Supports up to 200Gb/sec using Intel XL710 NICS +| UCS C260M2 | Supports up to 30Gb/sec due to V2 PCIe. +|================= + +.Internal Components +[options="header",cols="1,2",width="60%"] +|================= +| Components | Details +| CPU | 2x CPU E5-2620 +| CPU Configuration | 2-Socket CPU configurations (can also work with one CPU) +| Memory | 2x4 banks for each CPU. Total of 8 BANKS ==> 32GB +| NO RAID | NO RAID +|================= + +.Intel NICS supported +[options="header",cols="1,1,2",width="50%"] +|================= +| Bandwidth | Chipset | Example +| 1Gb/sec | Intel I350 | Intel 4x1GE 350-T4 NIC +| 10Gb/sec | Intel 82599 | Intel x520-D2 Cisco Order tool 2X Intel N2XX-AIPCI01, Intel X520 Dual Port 10Gb SFP+ Adapter +| 40Gb/sec | Intel XL710 | QSFP+ +| VMXNET | VMware paravirtualize | connect using vmWare vSwitch +| E1000 | paravirtualize | vmWare/KVM/VirtualBox +|================= + +IMPORTANT: Intel SFP+ 10Gb/Sec is the only one supported by default on the standard Linux driver. TRex also supports Cisco 10Gb/sec SFP+. + +.Sample order for UCSC-C220-M3S with 4x10Gb ports +[options="header",cols="2,1^",width="50%"] +|================= +| Component | Amount +| UCSC-C220-M3S | 1 +| UCS-CPU-E5-2650 | 2 +| UCS-MR-1X041RY-A | 8 +| A03-D500GC3 | 1 +| N2XX-AIPCI01 | 2 +| UCSC-PSU-650W | 1 +| SFS-250V-10A-IS | 1 +| UCSC-CMA1 | 1 +| UCSC-HS-C220M3 | 2 +| N20-BBLKD | 7 +| UCSC-PSU-BLKP | 1 +| UCSC-RAIL1 | 1 +|======================== + +NOTE: You should buy seperatly the 10Gb/sec SFP+, Cisco would be fine with TRex ( but not for plain Linux driver ). + +=== Install OS + +==== Supported versions + +Fedora 18, Ubuntu 14.04.1 LTS and Fedora 20 are the Linux OS supported. +More OS could be supported by compiling the drivers. + +==== Download ISO file + +Download the ISO from Fedora web site from link:http://archive.fedoraproject.org/pub/fedora/linux/releases/18/Fedora/x86_64/iso/[here]. + +Verify the checksum with the following command: + +[source,bash] +---- +$sha256sum Fedora-18-x86_64-DVD.iso +91c5f0aca391acf76a047e284144f90d66d3d5f5dcd26b01f368a43236832c03 #<1> +---- +<1> Should be equal to this number. + +==== Install Linux + +Ask your lab admin to install the Linux using CIMC, assign an IP, and set the DNS. Request the sudo or super user password to enable you to ping and SSH. + +IMPORTANT: To use TRex, you should have sudo on this machine or root password. +WARNING: Upgrading the linux Kernel using `yum upgrade` require to build the TRex drivers. + +==== Verify Intel NIC installation + +The following is an example of 4x10Gb/sec TRex with I350 management port and four x520-D2 (82599 chipset): + +[source,bash] +---- +$[root@trex]lspci | grep Network +01:00.0 Ethernet controller: Intel Corporation I350 Gigabit Network Connection (rev 01) #<1> +01:00.1 Ethernet controller: Intel Corporation I350 Gigabit Network Connection (rev 01) #<2> +03:00.0 Ethernet controller: Intel Corporation 82599EB 10-Gigabit SFI/SFP+ Network Connection (rev 01) #<3> +03:00.1 Ethernet controller: Intel Corporation 82599EB 10-Gigabit SFI/SFP+ Network Connection (rev 01) +82:00.0 Ethernet controller: Intel Corporation 82599EB 10-Gigabit SFI/SFP+ Network Connection (rev 01) +82:00.1 Ethernet controller: Intel Corporation 82599EB 10-Gigabit SFI/SFP+ Network Connection (rev 01) +---- +<1> Management port +<2> CIMC port +<3> 10Gb/sec traffic ports ( Intel 82599EB) + +=== Obtaining the TRex package + +Connect by ssh to the TRex machine and do the following: + +assuming *$WEB_URL* is *{web_server_url}* + +[source,bash] +---- +$mkdir trex +$cd trex +$wget --no-cache $WEB_URL/release/latest +$tar -xzvf latest +---- + + +to take the bleeding edge version +[source,bash] +---- +$wget --no-cache $WEB_URL/release/be_latest +---- + +To obtain a specific version, do the following: +[source,bash] +---- +$wget --no-cache $WEB_URL/release/vX.XX.tar.gz #<1> +---- + +=== Running TRex for the first time in loopback + +If you have 10Gb/sec TRex (based on Intel 520-D2 NICs) you can verify that it works correctly by loopback the ports. +You can install Intel SFP+ or Cisco SFP+, but you cannot connect ports that are on the same NIC to each other (it might not sync). +If you have only one NIC of 10gb/sec you cannot perform this test beacause the ports will not have valid link. +Another option for loopback is to use Cisco twinax copper cable see link:http://www.fiberopticshare.com/tag/cisco-10g-twinax[here] + +//TBD: perhaps rephase, using a "Prerequisites" or "Required" heading. The requirement here would be: Two (2) 10gb/sec NICs +//[hh] it is not accurate beacuse with 1Gb/sec you can have this test + +.Correct loopback +image:images/loopback_right.png[title="rigt"] + +.Wrong loopback +image:images/loopback_wrong.png[title="rigt"] + +In case you have 1Gb/Sec Intel NIC (I350) you can do anything you like from the loopback perspective *but* you must filter the management port before see xref:trex_config[here]. + +==== Identify the ports + +[source,bash] +---- + $>sudo ./dpdk_setup_ports.py --s + + Network devices using DPDK-compatible driver + ============================================ + + Network devices using kernel driver + =================================== + 0000:02:00.0 '82545EM Gigabit Ethernet Controller (Copper)' if=eth2 drv=e1000 unused=igb_uio *Active* + 0000:03:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<1> + 0000:03:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<2> + 0000:13:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<3> + 0000:13:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<4> + + + Other network devices + ===================== + +---- +<1> TRex interface #1 before unbinding +<2> TRex interface #2 before unbinding +<3> TRex interface #3 before unbinding +<4> TRex interface #4 before unbinding + +Now choose the port you want to use and follow the next section by creating a configuration file. + +==== Create minimum configuration file + +Create a configuration file in `/etc/trex_cfg.yaml`. + +You could copy a basic configuration file from cfg folder by running this command. + +[source,bash] +---- +$cp cfg/vm1.yaml /etc/trex_cfg.yaml +---- + +Now edit the configuration file with the right values from the previous section + +[source,bash] +---- + +- port_limit : 4 #<1> + version : 2 #<2> + interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] #<3> +---- +<1> the number of ports +<2> must add version 2 to the configuration file +<3> The list of interface from `#>sudo ./dpdk_setup_ports.py -s`, in this example it was taken + +When working with VM, you must set the destination mac of one port as the source or the other for loopback the port in the vSwitch +and you should take the right value from the hypervisor (in case of a physical NIC you can set the mac-address with virtual you can't and you should take it from the hypervisor) +and example + +[source,python] +---- + - port_limit : 2 + version : 2 + interfaces : ["03:00.0","03:00.1"] + port_info : # set eh mac addr + - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] # port 0 + src_mac : [0x2,0x0,0x0,0x2,0x0,0x00] <1> + - dest_mac : [0x2,0x0,0x0,0x2,0x0,0x00] # port 1 <1> + src_mac : [0x1,0x0,0x0,0x1,0x0,0x00] +---- +<1> source mac is like destination mac (this should be set or taken from vmware). the mac was taken from hypervisor + +==== Running TRex + +Run this for 4x10Gb/sec TRex: +[source,bash] +---- +$sudo ./t-rex-64 -f cap2/dns.yaml -c 4 -m 1 -d 100 -l 1000 +---- + +NOTE: For 10Gb/sec TRex with 2,6, or 8 ports, add --limit-ports [number of ports] *or* follow xref:trex_config[this] to configure the TRex. +//TBD: recommend bold for the 2 commands. + +If successful, the output will be similar to the following: + +[source,python] +---- +$ sudo ./t-rex-64 -f cap2/dns.yaml -d 100 -l 1000 +Starting T-Rex 1.50 please wait ... +zmq publisher at: tcp://*:4500 + number of ports founded : 4 + port : 0 + ------------ + link : link : Link Up - speed 10000 Mbps - full-duplex <1> + promiscuous : 0 + port : 1 + ------------ + link : link : Link Up - speed 10000 Mbps - full-duplex + promiscuous : 0 + port : 2 + ------------ + link : link : Link Up - speed 10000 Mbps - full-duplex + promiscuous : 0 + port : 3 + ------------ + link : link : Link Up - speed 10000 Mbps - full-duplex + promiscuous : 0 + + + -Per port stats table + ports | 0 | 1 | 2 | 3 + ------------------------------------------------------------------------------------- + opackets | 1003 | 1003 | 1002 | 1002 + obytes | 66213 | 66229 | 66132 | 66132 + ipackets | 1003 | 1003 | 1002 | 1002 + ibytes | 66225 | 66209 | 66132 | 66132 + ierrors | 0 | 0 | 0 | 0 + oerrors | 0 | 0 | 0 | 0 + Tx Bw | 217.09 Kbps | 217.14 Kbps | 216.83 Kbps | 216.83 Kbps + + -Global stats enabled + Cpu Utilization : 0.0 % <12> 29.7 Gb/core <13> + Platform_factor : 1.0 + Total-Tx : 867.89 Kbps <2> + Total-Rx : 867.86 Kbps <3> + Total-PPS : 1.64 Kpps + Total-CPS : 0.50 cps + + Expected-PPS : 2.00 pps <9> + Expected-CPS : 1.00 cps <10> + Expected-BPS : 1.36 Kbps <11> + + Active-flows : 0 <6> Clients : 510 Socket-util : 0.0000 % + Open-flows : 1 <7> Servers : 254 Socket : 1 Socket/Clients : 0.0 + drop-rate : 0.00 bps <8> + current time : 5.3 sec + test duration : 94.7 sec + + -Latency stats enabled + Cpu Utilization : 0.2 % <14> + if| tx_ok , rx_ok , rx ,error, average , max , Jitter , max window + | , , check, , latency(usec),latency (usec) ,(usec) , + -------------------------------------------------------------------------------------------------- + 0 | 1002, 1002, 0, 0, 51 , 69, 0 | 0 69 67 <4> + 1 | 1002, 1002, 0, 0, 53 , 196, 0 | 0 196 53 <5> + 2 | 1002, 1002, 0, 0, 54 , 71, 0 | 0 71 69 + 3 | 1002, 1002, 0, 0, 53 , 193, 0 | 0 193 52 +---- +<1> Link must be up for TRex to work. +<2> Total Rx must be the same as Tx +<3> Total Rx must be the same as Tx +<4> Tx_ok == Rx_ok +<5> Tx_ok == Rx_ok +<6> Number of TRex active "flows". Could be diffrent than the Router flows due to aging issues. Usualy TRex number of active flows is much lower that router. +<7> Number of TRex flows from startup. +<8> Drop rate. +<9> Expected Packet Per Second (without the latency packets). +<10> Expected Connection Per Second (without the latency packets). +<11> Expected Bit Per Second (without the latency packets). +<12> Average CPU utilization of transmitters threads. For best results it should be lower than 80%. +<13> Gb/sec generated per core of DP. Higer is better. +<14> Rx and latency thread CPU utilization. + +WARNING: if you don't see rx packets, revisit your mac-address configuration. + +==== Running TRex for the first time with router + +You can follow this presentation link:trex_config_guide.html[first time TRex configuration] +//TBD: Note that the link does not work correctly in PDF rendition +or continue reading. +TRex set source-mac of all port to `00:00:00:01:00:00` and expected to get to this MAC-address `00:00:00:01:00:00` without a config file. +so you just need to configure router with the right mac-address. + +NOTE: Virtual routers on ESXi (for example, Cisco CSR1000v) must have a distinct MAC address for each port. Specify the address in the configuration file. see more xref:trex_config[here]. Another example is where the TRex is connected to a switch. In that case each of TRex port should have a distinc MAC address. + +include::trex_book_basic.asciidoc[] + +== Advanced features + + +=== VLAN Trunk support anchor:trex_valn[] + +The VLAN Trunk TRex feature attempts to solve the router port bandwidth limitation when the traffic profile is asymmetric. Example: SFR profile is asymmetric and was the first usecase. +This feature converts asymmetric traffic to symmetric, from the port perspective, using router sub-interfaces. +This feature requires TRex to send the traffic on two VLANs. The following describes how this works. + +.YAML format +[source,python] +---- + vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 } +---- + + +.Example +[source,python] +---- +- duration : 0.1 + vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 } <1> +---- +<1> enable VLAN feature , valn0==100 , valn1==200 + +*Problem definition:*:: + +Assuming a TRex with two ports and an SFR traffic profile. + +.Without VLAN/sub interfaces +[source,python] +---- +0 ( client) -> [ ] - 1 ( server) +---- +Without VLAN support it is not symmetric. From port 0 (client side), it sends 10%, from and port 1 (server) sends 90%. Port 1 become the bottlneck (10Gb/s limit) before port 0 + +.With VLAN/sub interfaces +[source,python] +---- +port 0 ( client VLAN0) <-> | | <-> port 1 ( server-VLAN0) +port 0 ( server VLAN1) <-> | | <-> port 1 ( client-VLAN1) +---- + +In this case both ports will have the same amount of traffic. + +*Router configuation:*:: +[source,python] +---- + ! + interface TenGigabitEthernet1/0/0 <1> + mac-address 0000.0001.0000 + mtu 4000 + no ip address + load-interval 30 + ! + i + interface TenGigabitEthernet1/0/0.100 + encapsulation dot1Q 100 <2> + ip address 11.77.11.1 255.255.255.0 + ip nbar protocol-discovery + ip policy route-map vlan_100_p1_to_p2 <3> + ! + interface TenGigabitEthernet1/0/0.200 + encapsulation dot1Q 200 <4> + ip address 11.88.11.1 255.255.255.0 + ip nbar protocol-discovery + ip policy route-map vlan_200_p1_to_p2 <5> + ! + interface TenGigabitEthernet1/1/0 + mac-address 0000.0001.0000 + mtu 4000 + no ip address + load-interval 30 + ! + interface TenGigabitEthernet1/1/0.100 + encapsulation dot1Q 100 + ip address 22.77.11.1 255.255.255.0 + ip nbar protocol-discovery + ip policy route-map vlan_100_p2_to_p1 + ! + interface TenGigabitEthernet1/1/0.200 + encapsulation dot1Q 200 + ip address 22.88.11.1 255.255.255.0 + ip nbar protocol-discovery + ip policy route-map vlan_200_p2_to_p1 + ! + + arp 11.77.11.12 0000.0001.0000 ARPA <6> + arp 22.77.11.12 0000.0001.0000 ARPA + + route-map vlan_100_p1_to_p2 permit 10 <7> + set ip next-hop 22.77.11.12 + ! + route-map vlan_100_p2_to_p1 permit 10 + set ip next-hop 11.77.11.12 + ! + + route-map vlan_200_p1_to_p2 permit 10 + set ip next-hop 22.88.11.12 + ! + route-map vlan_200_p2_to_p1 permit 10 + set ip next-hop 11.88.11.12 + ! +---- +<1> Disable the IP on the main port it is important +<2> Enable VLAN1 +<3> PBR configuration +<4> Enable VLAN2 +<5> PBR configuration +<6> TRex MAC-address destination port +<7> PBR configuration rules + +=== Static source MAC-address setting + +With this feature, TRex replaces the source MAC address with the client IP address. +Note: This feature was requested by the Cisco ISG group. + + +*YAML:*:: +[source,python] +---- + mac_override_by_ip : true +---- + +.Example +[source,python] +---- +- duration : 0.1 + .. + mac_override_by_ip : true <1> +---- +<1> In this case, the client side MAC address will be look like this: +SRC_MAC = IPV4(IP) + 00:00 + +=== IPv6 support ( `--ipv6`); + +Support for IPv6 includes: + +1. Support for pcap files containing IPv6 packets +2. Ability to generate IPv6 traffic from pcap files containing IPv4 packets +The following switch enables this feature: `--ipv6` +Two new keywords (src_ipv6, dst_ipv6) have been added to the YAML +file to specify the most significant 96-bits of the IPv6 address - for example: + +[source,python] +---- + src_ipv6 : [0xFE80,0x0232,0x1002,0x0051,0x0000,0x0000] + dst_ipv6 : [0x2001,0x0DB8,0x0003,0x0004,0x0000,0x0000] +---- + +The IPv6 address is formed by placing what would typically be the IPv4 +address into the least significant 32-bits and copying the value provided +in the src_ipv6/dst_ipv6 keywords into the most signficant 96-bits. +If src_ipv6 and dst_ipv6 are not specified in the YAML file, the default +is to form IPv4-compatible addresses (where the most signifcant 96-bits +are zero). + +There is a support for all plugins (control flows that needed to be change). + +*An example:*:: +[source,bash] +---- +$sudo ./t-rex-64 -f cap2l/sfr_delay_10_1g.yaml -c 4 -p -l 100 -d 100000 -m 30 --ipv6 +---- + +*Limitations:*:: + +* TRex cannot generate both IPv4 and IPv6 traffic. The --ipv6 switch must be specified even when using a pcap file containing only IPv6 packets + + +*Router configuration:*:: + +[source,python] +---- +interface TenGigabitEthernet1/0/0 + mac-address 0000.0001.0000 + mtu 4000 + ip address 11.11.11.11 255.255.255.0 + ip policy route-map p1_to_p2 + load-interval 30 + ipv6 enable ==> IPv6 + ipv6 address 2001:DB8:1111:2222::1/64 <1> + ipv6 policy route-map ipv6_p1_to_p2 <2> +! + + +ipv6 unicast-routing <3> + +ipv6 neighbor 3001::2 TenGigabitEthernet0/1/0 0000.0002.0002 <4> +ipv6 neighbor 2001::2 TenGigabitEthernet0/0/0 0000.0003.0002 + +route-map ipv6_p1_to_p2 permit 10 <5> + set ipv6 next-hop 2001::2 +! +route-map ipv6_p2_to_p1 permit 10 + set ipv6 next-hop 3001::2 +! + + +asr1k(config)#ipv6 route 4000::/64 2001::2 +asr1k(config)#ipv6 route 5000::/64 3001::2 +---- +<1> enable ipv6 +<2> add pbr +<3> enable ipv6 routing +<4> mac-addr setting should be like T-Rex +<5> PBR configuraion + + +=== Source MAC-address mapping using a file + +Extending the source MAC-address replacment capability. +It is possible to have a mapping betwean IPv4->MAC using the new `--mac` CLI switch +file format is YAML. + +*An example:*:: +[source,bash] +---- +$sudo ./t-rex-64 -f cap2/sfr_delay_10_1g.yaml -c 4 -l 100 -d 100000 -m 30 --mac cap2/test_example.yaml +---- + +*MAC file structure:*:: + +[source,python] +---- +- items : + - ip : "16.0.0.1" + mac : [0x16,0x1,0x4,0x5,0x6,0x7] + - ip : "16.0.0.2" + mac : [0x16,0x2,0x0,0x1,0x0,0x0] +---- + +*Limitations:*:: + +. It is assumed that most of the clients has MAC-addrees. at least 90% of the IP should have a MAC-addrees mapping. + +=== Destination mac address spreadings anchor:mac_spread[] + +Using this option, one can send traffic to a few destination devices. In normal mode all the packets are sent to the port destination mac-address. +to enable this option add this CLI `--mac-spread` to the command line + +example: + +[source,bash] +---- +$sudo ./t-rex-64 -f cap2/http_simple.yaml -d 1000 -m 1000 -c 4 -l 100 --mac-spread 2 +---- +in this case TRex will send to port destination mac and port destination mac +1 +using a switch you could connect TRex to a few DUT. +All the DUTs should return the traffic only to right port source address + +[source,bash] +---- + switch A switch A + | | + | D0+0 -> DUT0 <- D1+0 | +TRex(0) -| |-TRex(1) + | | + | D0+1 -> DUT1 <- D1+1 | + | + +---- + + +=== NAT support + +TRex can learn dynamic NAT/PAT translation. To enable this feature add `--learn` to the command line. +//TBD: maybe... add the '--learn' option on the command line. +This is done by adding an IPv4 option header with TRex info (8 bytes long 0x10 id) to the first packet of the flow. + +*Example:*:: + +*simple HTTP traffic* + +[source,bash] +---- +$sudo ./t-rex-64 -f cap2/http_simple.yaml -c 4 -l 1000 -d 100000 -m 30 --learn +---- + +*SFR traffic without bundeling/ALG support* + +[source,bash] +---- +$sudo ./t-rex-64 -f avl/sfr_delay_10_1g_no_bundeling.yaml -c 4 -l 1000 -d 100000 -m 10 --learn +---- + +*New terminal counters:*:: + +[source,python] +---- +-Global stats enabled + Cpu Utilization : 0.6 % 33.4 Gb/core + Platform_factor : 1.0 + Total-Tx : 773.76 Mbps Nat_time_out : 0 <1> + Total-Rx : 770.47 Mbps Nat_no_fid : 0 <2> + Total-PPS : 106.73 Kpps Total_nat_active: 9 <3> + Total-CPS : 2.78 Kcps Total_nat_open : 232129 <4> +---- +<1> The number of translations with timeout should be zero. Usually this occurs when the router drops the flow due to NAT. +<2> Translation not found. This can occur when there is large latency in the router input/output queue. +<3> Active number of TRex traslation flows, should be low in the case of low RTT. +<4> A total of TRex translation. May be different from the total number of flows in case template is uni-directional (no need a translation). + + +*Configuration for Cisco ASR1000 Series:*:: + +The feature was tested with the following configuration and sfr_delay_10_1g_no_bundeling. yaml traffic profile. +Clients address range is 16.0.0.1-16.0.0.255 + +[source,python] +---- +interface TenGigabitEthernet1/0/0 <1> + mac-address 0000.0001.0000 + mtu 4000 + ip address 11.11.11.11 255.255.255.0 + ip policy route-map p1_to_p2 + ip nat inside <2> + load-interval 30 +! + +interface TenGigabitEthernet1/1/0 + mac-address 0000.0001.0000 + mtu 4000 + ip address 11.11.11.11 255.255.255.0 + ip policy route-map p1_to_p2 + ip nat outside <3> + load-interval 30 + +ip nat pool my 200.0.0.0 200.0.0.255 netmask 255.255.255.0 <4> + +ip nat inside source list 7 pool my overload +access-list 7 permit 16.0.0.0 0.0.0.255 <5> + +ip nat inside source list 8 pool my overload <6> +access-list 8 permit 17.0.0.0 0.0.0.255 +---- +<1> Should be connected to TRex Client port (router inside port) +<2> NAT inside +<3> NAT outside +<4> Pool of outside address with overload +<5> Should match TRex YAML client range +<6> In case of dual port TRex. + + +*Limitations:*:: + +. The IPv6-IPv6 NAT feature does not exist on routers, so this feature can work on IPv4 only. +. Does not support NAT64. +. Bundeling/plugin support is not fully supported. This means that sfr_delay_10.yaml can't work.Use sfr_delay_10_no_bundeling.yaml instead. + +[NOTE] +===================================================================== +* `--learn-verify` is a debug TRex mechanism for testing the TRex learn mechanism. +* If the router is configured without NAT, it will verify that the inside_ip==outside_ip and inside_port==outside_port. +===================================================================== + +=== Flow order/latency verification ( `--rx-check` ) + +In normal mode (without this feature enabled), received traffic is not checked by software. It only counted by hardware (Intel NIC) for drop packets verification at test end of the test. The only exception is the Latency/Jitter packets. +This is one of the reasons that with TRex, you *cannot* check features that terminate traffic (for example TCP Proxy) +To enable this feature you should add to the command line options `--rx-check [sample]` where sample is the sample rate. +1/sample flows will be loaded to the software for verification. For 40Gb/Sec traffic you can use a sample of 1/128. Watch for Rx CPU% utilization. + +INFO : This feature changes the TTL of the sample flows to 255 and expects 254 or 255 (one routing hop). If you have more than one hop in your setup, use `--hops` to change it to higher value. more than one hop could happned when there are number of routers betwean TRex client side to TRex server side. + +With this feature enabled: + +* You can verify that packets get out of DUT in order (from each flow perspective) +* There are no dropped packets. There is no need to wait for the end of the test. Without this feature enabled you must wait for the end of the test to be aware of dropped packets because there is always a difference between TX and Rx due RTT. To be sure there is a need to stop the traffic and wait for the Rx traffic and this happens only at the end of the test. + + +.Full example +[source,bash] +---- +$sudo ./t-rex-64 -f avl/sfr_delay_10_1g.yaml -c 4 -p -l 100 -d 100000 -m 30 --rx-check 128 +---- + +[source,python] +---- +Cpu Utilization : 0.1 % <1> + if| tx_ok , rx_ok , rx ,error, average , max , Jitter<1> , max window + | , , check, , latency(usec),latency (usec) ,(usec) , + -------------------------------------------------------------------------------- + 0 | 1002, 1002, 2501, 0, 61 , 70, 3 | 60 + 1 | 1002, 1002, 2012, 0, 56 , 63, 2 | 50 + 2 | 1002, 1002, 2322, 0, 66 , 74, 5 | 68 + 3 | 1002, 1002, 1727, 0, 58 , 68, 2 | 52 + + Rx Check stats enabled <2> + ------------------------------------------------------------------------------------------- + rx check: avg/max/jitter latency, 94 , 744, 49<1> | 252 287 309 <3> + + active flows: 10, fif: 308, drop: 0, errors: 0 <4> + ------------------------------------------------------------------------------------------- +---- +<1> CPU% of the Rx thread. If it is too high *increase* the sample rate. +<2> Rx Check section. For more detailed info, press 'r' during the test or at the end of the test. +<3> Average latency, max latency, jitter on the template flows in microseconds. This is usually *higher* than the latency check packet because the feature works more on this packet. +<4> Drop counters and errors counter should be zero. If not, press 'r' to see the full report or view the report at the end of the test. + +.Full report by pressing 'r' +[source,python] +---- + m_total_rx : 2 + m_lookup : 2 + m_found : 1 + m_fif : 1 + m_add : 1 + m_remove : 1 + m_active : 0 + <1> + 0 0 0 0 1041 0 0 0 0 0 0 0 0 min_delta : 10 usec + cnt : 2 + high_cnt : 2 + max_d_time : 1041 usec + sliding_average : 1 usec + precent : 100.0 % + histogram + ----------- + h[1000] : 2 + tempate_id_ 0 , errors: 0, jitter: 61 <2> + tempate_id_ 1 , errors: 0, jitter: 0 + tempate_id_ 2 , errors: 0, jitter: 0 + tempate_id_ 3 , errors: 0, jitter: 0 + tempate_id_ 4 , errors: 0, jitter: 0 + tempate_id_ 5 , errors: 0, jitter: 0 + tempate_id_ 6 , errors: 0, jitter: 0 + tempate_id_ 7 , errors: 0, jitter: 0 + tempate_id_ 8 , errors: 0, jitter: 0 + tempate_id_ 9 , errors: 0, jitter: 0 + tempate_id_10 , errors: 0, jitter: 0 + tempate_id_11 , errors: 0, jitter: 0 + tempate_id_12 , errors: 0, jitter: 0 + tempate_id_13 , errors: 0, jitter: 0 + tempate_id_14 , errors: 0, jitter: 0 + tempate_id_15 , errors: 0, jitter: 0 + ager : + m_st_alloc : 1 + m_st_free : 0 + m_st_start : 2 + m_st_stop : 1 + m_st_handle : 0 +---- +<1> Any errors shown here +<2> Error per template info + +*Limitation:*:: + +** This feature must be enabled with a latency check (-l). +** To receive the packets TRex does the following: +*** Changes the TTL to 0xff and expects 0xFF (loopback) or oxFE (route). ( use --hop to tune this number) +*** Adds 24 bytes of metadata as ipv4/ipv6 option header + +== Reference + +=== Traffic YAML + +==== Global Traffic YAML section + +[source,python] +---- +- duration : 10.0 <1> + generator : <2> + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.0.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 1 + udp_aging : 1 + mac : [0x00,0x00,0x00,0x01,0x00,0x00] <3> + vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 } <7> + mac_override_by_ip : true <8> + cap_ipg : true <4> + cap_ipg_min : 30 <5> + cap_override_ipg : 200 <6> +---- +<1> Duration of the test (seconds). Can override using the `-d` option. +<2> See the generator section. +<3> Default source/destination MAC address. The configuration file can override the defaults. +<4> TRUE indicates that the IPG is taken from pcap file. +<5> The following two options can set the min ipg in microseconds: ( if (pkt_ipg Value to override (microseconds). +<7> Enable valn feature. See xref:trex_valn[here] for info. +<8> Enable MAC address replacement by Client IP. + +==== Per template section + +[source,python] +---- + - name: cap2/dns.pcap <1> + cps : 10.0 <2> + ipg : 10000 <3> + rtt : 10000 <4> + w : 1 <5> + server_addr : "48.0.0.7" <6> + one_app_server : true <7> + +---- +<1> The name of the template pcap file. It can be relative to the t-rex-64 image or absolute path. The pcap file can include one flow. (Exception: in case of plug-ins). +<2> Connection per second for m==1 +<3> If the global section of the YAML file does not include `cap_ipg : true`, this line sets the inter-packet gap in microseconds. +<4> Should be set to the same value as ipg (microseconds). +<5> Default value: w=1. This indicates to the IP generator how to generate the flows. If w=2, two flows from the same template will be generated in a burst (more for HTTP that has burst of flows). +<6> If `one_app_server` is set to true, then all templates will use the same server. +<7> If the same server address is required, set this value to true. + + + +=== Configuration YAML anchor:trex_config[] + +The configuration file, in YAML format, configures TRex behavior, including: + + +- MAC address for each port (source and destination) +- Masking interfaces (usually for 1Gb/Sec TRex) to ensure that TRex does not take the management ports as traffic ports. +- Changing the zmq/telnet TCP port. + +==== Basic Configuration + +Copy/install the configuration file to `/etc/trex_cfg.yaml`. +TRex loads it automatically at startup. You still can override options with the command line option switch `--cfg [file]` in the CLI +Configuration file examples can be found in the `$ROOT/cfg` folder + + +[source,python] +---- + - port_limit : 2 <1> + version : 2 <2> + interfaces : ["03:00.0","03:00.1"] <3> + enable_zmq_pub : true <4> + zmq_pub_port : 4500 <5> + prefix : setup1 <6> + limit_memory : 1024 <7> + c : 4 <8> + port_bandwidth_gb : 10 <9> + port_info : # set eh mac addr + - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] # port 0 <10> + src_mac : [0x2,0x0,0x0,0x2,0x0,0x00] + - dest_mac : [0x3,0x0,0x0,0x3,0x0,0x00] # port 1 + src_mac : [0x4,0x0,0x0,0x4,0x0,0x00] + - dest_mac : [0x5,0x0,0x0,0x5,0x0,0x00] # port 2 + src_mac : [0x6,0x0,0x0,0x6,0x0,0x00] + - dest_mac : [0x7,0x0,0x0,0x7,0x0,0x01] # port 3 + src_mac : [0x0,0x0,0x0,0x8,0x0,0x02] + - dest_mac : [0x0,0x0,0x0,0x9,0x0,0x03] # port 4 +---- +<1> The number of ports, should be equal to the number of interfaces in 3) +<2> Must be set to 2 +<3> Interface that should be used. used `sudo ./dpdk_setup_ports.py --show` +<4> Enable the ZMQ publisher for stats data. +<5> ZMQ port number. + +<6> The name of the setup should be distinct ( DPDK --file-prefix ) +<7> DPDK -m limit the packet memory +<8> Number of threads per dual interface ( like -c CLI option ) +<9> The bandwidth of each interface in Gb/sec. In this example we have 10Gb/sec interfaces. for VM put 1. it used to tune the amount of memory allocated by TRex. +<10> MAC address per port - source and destination. + + +To find out what the interfaces ids, perform the following: + +[source,bash] +---- + $>sudo ./dpdk_setup_ports.py --show + + Network devices using DPDK-compatible driver + ============================================ + + Network devices using kernel driver + =================================== + 0000:02:00.0 '82545EM Gigabit Ethernet Controller (Copper)' if=eth2 drv=e1000 unused=igb_uio *Active* + 0000:03:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<1> + 0000:03:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<2> + 0000:13:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<3> + 0000:13:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<4> + + Other network devices + ===================== + +---- +<1> TRex interface #1 before unbinding +<2> TRex interface #2 before unbinding +<3> TRex interface #3 before unbinding +<4> TRex interface #4 before unbinding + + +minimum configuration file is: + +[source,bash] +---- + +- port_limit : 4 + version : 2 #<1> + interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] #<2> +---- +<1> must add version 2 to the configuration file +<2> The list of interfaces from `sudo ./dpdk_setup_ports.py --show` + + +==== Memory section configuration + +The memory section is optional. It is used when there is a need to tune the amount of memory used by packet manager + +[source,python] +---- + - port_limit : 2 + version : 2 + interfaces : ["03:00.0","03:00.1"] + memory : <1> + mbuf_64 : 16380 <2> + mbuf_128 : 8190 + mbuf_256 : 8190 + mbuf_512 : 8190 + mbuf_1024 : 8190 + mbuf_2048 : 4096 + traffic_mbuf_64 : 16380 <3> + traffic_mbuf_128 : 8190 + traffic_mbuf_256 : 8190 + traffic_mbuf_512 : 8190 + traffic_mbuf_1024 : 8190 + traffic_mbuf_2048 : 4096 + dp_flows : 1048576 <4> + global_flows : 10240 <5> +---- +<1> Memory section +<2> Per dual interfaces number of buffers - buffer for real time traffic generation +<3> Traffic buffer - when you have many template only this section should be enlarge +<4> number of TRex flows needed +<5> reserved + + +==== Platform section configuration + +The platform section is optional. It is used to tune the performance and allocate the cores to the right NUMA +a configuration file now has the folowing struct to support multi instance + +[source,python] +---- +- version : 2 + interfaces : ["03:00.0","03:00.1"] + port_limit : 2 + enable_zmq_pub : true # enable publisher for stats data + zmq_pub_port : 4507 + prefix : setup1 <1> + limit_memory : 1024 <2> + c : 4 <3> + port_bandwidth_gb : 10 <4> + platform : <5> + master_thread_id : 0 <6> + latency_thread_id : 5 <7> + dual_if : + - socket : 0 <8> + threads : [1,2,3,4] <9> +---- +<1> The name of the setup should be distinct ( DPDK --file-prefix ) +<2> DPDK -m +<3> Number of threads per dual interface ( like -c CLI option ) +<4> The bandwidth of each interface in Gb/sec. In this example we have 10Gb/sec interfaces. for VM put 1. it used to tune the amount of memory allocated by TRex. +<5> the platform section +<6> The thread_id for control +<7> The thread_id for latency if used +<8> Socket of the dual interfaces, in this example of 03:00.0 and 03:00.1, memory should be local to the interface +<9> Thread to be used, should be local to the NIC + + + +=== Command line options anchor:cml-line[] + +*-f=TRAFIC_YAML_FILE*:: + Traffic YAML configuration file. + +*-c=CORES*:: + Number of cores. Use 4 for TRex 40Gb/sec. Monitor the CPU% of TRex - it should be ~50%. + +*-l=HZ*:: + Run the latency daemon in this Hz rate. Example: -l 1000 runs 1000 pkt/sec from each interface. A value of zero (0) disables the latency check. + +*-d=DURATION*:: + Duration of the test (sec), Default: 0 + +*-m=MUL*:: + Factor for bandwidth (multiply the CPS of each template by this value). + +*--ipv6*:: + Convert template to IPv6 mode. + +*--learn*:: + Learn the dynamic NAT translation and ALG. + +*--learn-verify*:: + Learn the translation. This feature is intended for verification of the mechanism in cases where there is no NAT. + +*-p*:: + Flow-flip. Sends all flow packets from the same interface. This can solve the flow order. Does not work with any router configuration. + +*-e*:: + same as `-p` but comply to the direction rules and replace source/destination IPs. it might not be good for NBAR as it is expected clients ip to be sent from same direction. + +//TBD: The last 2 sentences (flow order, router configuration) are unclear. + + +*--lm=MASK*:: + Latency mask. Use this to verify port connectivity. Possible values: 0x1 (only port 0 will send traffic), 0x2 (only port 1 will send traffic). + +*--lo*:: + Latency test. + +*--limit-ports=PORTS*:: + Limit number of ports. Configure this in the --cfg file. Possible values (number of ports): 2, 4, 6, 8. (Default: 4) + +*--nc*:: + If set, will terminate exacly at the end of the duration. This provides a faster, more accurate TRex termination. In default it wait for all the flow to terminate gracefully. In case of a very long flow the termination might be prolong. + +*-pm=MULTIFLIER*:: + Platform factor. If the setup includes a splitter, you can multiply the total results by this factor. Example: --pm 2.0 will multiply all bps results by this factor. + +*-pubd*:: + Disable ZMQ monitor's publishers. + +*-1g*:: + Deprecated. Configure TRex to 1G. Configure this in the --cfg file. + +*-k=KSEC*:: + Run a latency test before starting the test. TRex will wait for x sec before and after sending latency packets at startup. + +*--cfg=platform_yaml*:: + Load and configure platform using this file. See example file: cfg/cfg_examplexx.yaml + This file is used to configure/mask interfaces, cores, affinity, and MAC addresses. + You can use the example file by copying it to: /etc/trex_cfg.yaml + + +*-v=VERBOSE*:: + Verbose mode (works only on the debug image! ) + 1 Show only stats. + 2 Run preview. Does not write to file. + 3 Run preview and write to stats file. + Note: When using verbose mode, it is not necessary to add an output file. + Caution: Operating in verbose mode can generate very large files (terabytes). Use with caution, only on a local drive. + + +*--rx-check=SAMPLE_RATE*:: + Enable Rx check module. Using this each thread samples flows (1/sample) and checks order, latency, and additional statistics. + Note: This feature operates as an additional thread. + +*--hops=HOPES*:: + Number of hops in the setup (default is one hop). Relevant only if the Rx check is enabled. + +*--iom=MODE*:: + I/O mode for interactive mode. Possible values: 0 (silent), 1 (normal), 2 (short) + +*--no-flow-control*:: + Prevents TRex from changing flow control. In default TRex operation, flow control is disabled at startup. + +*--mac-spread*:: + Spread the destination mac by this this factor. e.g 2 will generate the traffic to 2 devices DEST-MAC ,DEST-MAC+1. The maximum is up to 128 devices. + + +ifndef::backend-docbook[] + + +endif::backend-docbook[] + +== Appendix + +=== Simulator + +The TRex simulator is a linux application that can process on any Linux CEL (it can run on TRex itself). +you can create create output pcap file from input of traffic YAML. + +==== Simulator + + +[source,bash] +---- + +$./bp-sim-64-debug -f avl/sfr_delay_10_1g.yaml -v 1 + + -- loading cap file avl/delay_10_http_get_0.pcap + -- loading cap file avl/delay_10_http_post_0.pcap + -- loading cap file avl/delay_10_https_0.pcap + -- loading cap file avl/delay_10_http_browsing_0.pcap + -- loading cap file avl/delay_10_exchange_0.pcap + -- loading cap file avl/delay_10_mail_pop_0.pcap + -- loading cap file avl/delay_10_mail_pop_1.pcap + -- loading cap file avl/delay_10_mail_pop_2.pcap + -- loading cap file avl/delay_10_oracle_0.pcap + -- loading cap file avl/delay_10_rtp_160k_full.pcap + -- loading cap file avl/delay_10_rtp_250k_full.pcap + -- loading cap file avl/delay_10_smtp_0.pcap + -- loading cap file avl/delay_10_smtp_1.pcap + -- loading cap file avl/delay_10_smtp_2.pcap + -- loading cap file avl/delay_10_video_call_0.pcap + -- loading cap file avl/delay_10_sip_video_call_full.pcap + -- loading cap file avl/delay_10_citrix_0.pcap + -- loading cap file avl/delay_10_dns_0.pcap + id,name , tps, cps,f-pkts,f-bytes, duration, Mb/sec, MB/sec, c-flows, PPS,total-Mbytes-duration,errors,flows #<2> + 00, avl/delay_10_http_get_0.pcap ,404.52,404.52, 44 , 37830 , 0.17 , 122.42 , 15.30 , 67 , 17799 , 2 , 0 , 1 + 01, avl/delay_10_http_post_0.pcap ,404.52,404.52, 54 , 48468 , 0.21 , 156.85 , 19.61 , 85 , 21844 , 2 , 0 , 1 + 02, avl/delay_10_https_0.pcap ,130.87,130.87, 96 , 91619 , 0.22 , 95.92 , 11.99 , 29 , 12564 , 1 , 0 , 1 + 03, avl/delay_10_http_browsing_0.pcap ,709.89,709.89, 37 , 34425 , 0.13 , 195.50 , 24.44 , 94 , 26266 , 2 , 0 , 1 + 04, avl/delay_10_exchange_0.pcap ,253.81,253.81, 43 , 9848 , 1.57 , 20.00 , 2.50 , 400 , 10914 , 0 , 0 , 1 + 05, avl/delay_10_mail_pop_0.pcap ,4.76,4.76, 20 , 5603 , 0.17 , 0.21 , 0.03 , 1 , 95 , 0 , 0 , 1 + 06, avl/delay_10_mail_pop_1.pcap ,4.76,4.76, 114 , 101517 , 0.25 , 3.86 , 0.48 , 1 , 543 , 0 , 0 , 1 + 07, avl/delay_10_mail_pop_2.pcap ,4.76,4.76, 30 , 15630 , 0.19 , 0.60 , 0.07 , 1 , 143 , 0 , 0 , 1 + 08, avl/delay_10_oracle_0.pcap ,79.32,79.32, 302 , 56131 , 6.86 , 35.62 , 4.45 , 544 , 23954 , 0 , 0 , 1 + 09, avl/delay_10_rtp_160k_full.pcap ,2.78,8.33, 1354 , 1232757 , 61.24 , 27.38 , 3.42 , 170 , 3759 , 0 , 0 , 3 + 10, avl/delay_10_rtp_250k_full.pcap ,1.98,5.95, 2069 , 1922000 , 61.38 , 30.48 , 3.81 , 122 , 4101 , 0 , 0 , 3 + 11, avl/delay_10_smtp_0.pcap ,7.34,7.34, 22 , 5618 , 0.19 , 0.33 , 0.04 , 1 , 161 , 0 , 0 , 1 + 12, avl/delay_10_smtp_1.pcap ,7.34,7.34, 35 , 18344 , 0.21 , 1.08 , 0.13 , 2 , 257 , 0 , 0 , 1 + 13, avl/delay_10_smtp_2.pcap ,7.34,7.34, 110 , 96544 , 0.27 , 5.67 , 0.71 , 2 , 807 , 0 , 0 , 1 + 14, avl/delay_10_video_call_0.pcap ,11.90,11.90, 2325 , 2532577 , 36.56 , 241.05 , 30.13 , 435 , 27662 , 3 , 0 , 1 + 15, avl/delay_10_sip_video_call_full.pcap ,29.35,58.69, 1651 , 120315 , 24.56 , 28.25 , 3.53 , 721 , 48452 , 0 , 0 , 2 + 16, avl/delay_10_citrix_0.pcap ,43.62,43.62, 272 , 84553 , 6.23 , 29.51 , 3.69 , 272 , 11866 , 0 , 0 , 1 + 17, avl/delay_10_dns_0.pcap ,1975.02,1975.02, 2 , 162 , 0.01 , 2.56 , 0.32 , 22 , 3950 , 0 , 0 , 1 + + 00, sum ,4083.86,93928.84, 8580 , 6413941 , 0.00 , 997.28 , 124.66 , 2966 , 215136 , 12 , 0 , 23 + Memory usage + size_64 : 1687 + size_128 : 222 + size_256 : 798 + size_512 : 1028 + size_1024 : 86 + size_2048 : 4086 + Total : 8.89 Mbytes 159% util #<1> + +---- +<1> the memory usage of the templates +<2> CSV for all the templates + diff --git a/trex_book_basic.asciidoc b/trex_book_basic.asciidoc new file mode 100755 index 00000000..d64cea43 --- /dev/null +++ b/trex_book_basic.asciidoc @@ -0,0 +1,3308 @@ + +== Basic usage + +=== DNS basic example + +The following is a simple example helpful for understanding how TRex works. The example uses the TRex simulator. +This simulator can be run on any Cisco Linux including on the TRex itself. +TRex simulates clients and servers and generates traffic based on the pcap files provided. + +.Clients/Servers +image:images/trex_model.png[title="generator"] + +The following is an example YAML-format traffic configuration file (cap2/dns_test.yaml), with explanatory notes. + +[source,python] +---- +$more cap2/dns_test.yaml +- duration : 10.0 + generator : + distribution : "seq" + clients_start : "16.0.0.1" <1> + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" <2> + servers_end : "48.0.0.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 1 + udp_aging : 1 + mac : [0x00,0x00,0x00,0x01,0x00,0x00] + cap_info : + - name: cap2/dns.pcap <3> + cps : 1.0 <4> + ipg : 10000 <5> + rtt : 10000 <6> + w : 1 +---- +<1> Range of clients (IPv4 format). +<2> Range of servers (IPv4 format). +<3> pcap file, which includes the DNS cap file that will be used as a template. +<4> Number of connections per second to generate. In the example, 1.0 means 1 connection per secod. +<5> Inter-packet gap (microseconds). 10,000 = 10 msec. +<6> Should be the same as ipg. + +.DNS template file +image:images/dns_wireshark.png[title="generator"] + + +The DNS template file includes: + +1. *One* flow +2. Two packets +3. First packet: from the initiator (client -> server) +4. Second packet: response (server -> client) + +TRex replaces the client_ip, client_port, and server_ip. The server_port will be remain the same. + + +[source,bash] +---- +$./bp-sim-32-debug -f cap2/dns.yaml -o my.erf -v 3 + -- loading cap file cap2/dns.pcap + id,name , tps, cps,f-pkts,f-bytes, duration, Mb/sec, MB/sec, #<1> + 00, cap2/dns.pcap ,1.00,1.00, 2 , 170 , 0.02 , 0.00 , 0.00 , + 00, sum ,1.00,1.00, 2 , 170 , 0.00 , 0.00 , 0.00 , + + Generating erf file ... +pkt_id,time,fid,pkt_info,pkt,len,type,is_init,is_last,type,thread_id,src_ip,dest_ip,src_port #<2> + 1 ,0.010000,1,0x9055598,1,77,0,1,0,0,0,10000001,30000001,1024 + 2 ,0.020000,1,0x9054760,2,93,0,0,1,0,0,10000001,30000001,1024 + 3 ,2.010000,2,0x9055598,1,77,0,1,0,0,0,10000002,30000002,1024 + 4 ,2.020000,2,0x9054760,2,93,0,0,1,0,0,10000002,30000002,1024 + 5 ,3.010000,3,0x9055598,1,77,0,1,0,0,0,10000003,30000003,1024 + 6 ,3.020000,3,0x9054760,2,93,0,0,1,0,0,10000003,30000003,1024 + 7 ,4.010000,4,0x9055598,1,77,0,1,0,0,0,10000004,30000004,1024 + 8 ,4.020000,4,0x9054760,2,93,0,0,1,0,0,10000004,30000004,1024 + 9 ,5.010000,5,0x9055598,1,77,0,1,0,0,0,10000005,30000005,1024 + 10 ,5.020000,5,0x9054760,2,93,0,0,1,0,0,10000005,30000005,1024 + 11 ,6.010000,6,0x9055598,1,77,0,1,0,0,0,10000006,30000006,1024 + 12 ,6.020000,6,0x9054760,2,93,0,0,1,0,0,10000006,30000006,1024 + 13 ,7.010000,7,0x9055598,1,77,0,1,0,0,0,10000007,30000007,1024 + 14 ,7.020000,7,0x9054760,2,93,0,0,1,0,0,10000007,30000007,1024 + 15 ,8.010000,8,0x9055598,1,77,0,1,0,0,0,10000008,30000008,1024 + 16 ,8.020000,8,0x9054760,2,93,0,0,1,0,0,10000008,30000008,1024 + 17 ,9.010000,9,0x9055598,1,77,0,1,0,0,0,10000009,30000009,1024 + 18 ,9.020000,9,0x9054760,2,93,0,0,1,0,0,10000009,30000009,1024 + 19 ,10.010000,a,0x9055598,1,77,0,1,0,0,0,1000000a,3000000a,1024 + 20 ,10.020000,a,0x9054760,2,93,0,0,1,0,0,1000000a,3000000a,1024 + +file stats +================= + m_total_bytes : 1.66 Kbytes + m_total_pkt : 20.00 pkt + m_total_open_flows : 10.00 flows + m_total_pkt : 20 + m_total_open_flows : 10 + m_total_close_flows : 10 + m_total_bytes : 1700 +---- +<1> Global statistics on the templates given. cps=connection per second. tps is template per second. they might be different in case of plugins where one template includes more than one flow. For example RTP flow in SFR profile (avl/delay_10_rtp_160k_full.pcap) +<2> Generator output. + + +[source,bash] +---- +$wireshark my.erf +---- +gives +//TBD: Not sure what the output looks like here, with this line showing only "gives" + +.TRex generated output file +image:images/dns_trex_run.png[title="generator"] + +As the output file shows... + +- TRex generates a new flow every 1 sec. +- Client IP values are taken from client IP pool . +- Servers IP values are taken from server IP pool . +- IPG (iter packet gap) values are taken from the configuration file (10 msec). + + +[NOTE] +===================================================================== +In basic usage, TRex does not wait for an initiator packet to be received. The response packet will be triggered based only on timeout (IPG in this example). +In advanced scenarios (for example, NAT), The first packet of the flow can process by TRex software and initiate the response packet only when a packet is received. +Consequently, it is necessary to *process* the template pcap file offline and ensure that there is enough round-trip delay (RTT) between client and server packets. +One approach is to record the flow with a Pagent that creats RTT (10 msec RTT in the example), recording the traffic at some distance from both the client and server (not close to either side). +This ensures sufficient delay that packets from each side will arrive without delay in the DUT. TRex-dev will work on an offline tool that will make it even simpler. +Another approach is to change the `yaml` `ipg` field to a high enough value (bigger than 10msec ). +===================================================================== + +Converting the simulator text results in a table similar to the following: + +.DNS example formatted results +[format="csv",cols="1^,2^,1^,1^,2^,1^,2^,1^", options="header"] +|================= + pkt,time sec,fid,flow-pkt-id,client_ip,client_port,server_ip ,direction + 1 , 0.010000 , 1 , 1 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 2 , 0.020000 , 1 , 2 , 16.0.0.1 , 1024 , 48.0.0.1 , <- + 3 , 2.010000 , 2 , 1 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 4 , 2.020000 , 2 , 2 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 5 , 3.010000 , 3 , 1 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 6 , 3.020000 , 3 , 2 , 16.0.0.3 , 1024 , 48.0.0.3 , <- + 7 , 4.010000 , 4 , 1 , 16.0.0.4 , 1024 , 48.0.0.4 , -> + 8 , 4.020000 , 4 , 2 , 16.0.0.4 , 1024 , 48.0.0.4 , <- + 9 , 5.010000 , 5 , 1 , 16.0.0.5 , 1024 , 48.0.0.5 , -> + 10 , 5.020000 , 5 , 2 , 16.0.0.5 , 1024 , 48.0.0.5 , <- + 11 , 6.010000 , 6 , 1 , 16.0.0.6 , 1024 , 48.0.0.6 , -> + 12 , 6.020000 , 6 , 2 , 16.0.0.6 , 1024 , 48.0.0.6 , <- + 13 , 7.010000 , 7 , 1 , 16.0.0.7 , 1024 , 48.0.0.7 , -> + 14 , 7.020000 , 7 , 2 , 16.0.0.7 , 1024 , 48.0.0.7 , <- + 15 , 8.010000 , 8 , 1 , 16.0.0.8 , 1024 , 48.0.0.8 , -> + 16 , 8.020000 , 8 , 2 , 16.0.0.8 , 1024 , 48.0.0.8 , <- + 17 , 9.010000 , 9 , 1 , 16.0.0.9 , 1024 , 48.0.0.9 , -> + 18 , 9.020000 , 9 , 2 , 16.0.0.9 , 1024 , 48.0.0.9 , <- + 19 , 10.010000 , a , 1 , 16.0.0.10 , 1024 , 48.0.0.10 , -> + 20 , 10.020000 , a , 2 , 16.0.0.10 , 1024 , 48.0.0.10 , <- +|================= + +where: +fid:: + Flow ID - different IDs for each flow. + +low-pkt-id:: + Packet ID within the flow. Numbering begins with 1. + +client_ip:: + Client IP address. + +client_port:: + Client IP port. + +server_ip:: + Server IP address. + +direction:: + Direction. "->" is client-to-server; "<-" is server-to-client. + + + +The following enlarges the CPS and reduces the duration. + +[source,python] +---- +$more cap2/dns_test.yaml +- duration : 1.0 <1> + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.0.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 1 + udp_aging : 1 + mac : [0x00,0x00,0x00,0x01,0x00,0x00] + cap_info : + - name: cap2/dns.pcap + cps : 10.0 <2> + ipg : 50000 <3> + rtt : 50000 + w : 1 +---- +<1> Duration is 1 second. +<2> CPS is 10.0. +<3> IPG is 50 msec. + + +Running this produces the following output: + +[source,bash] +---- +$./bp-sim-32-debug -f cap2/dns_test.yaml -o my.erf -v 3 +---- + +.Formated results +[format="csv",cols="1^,2^,1^,1^,1^,2^,1^,2^,1^", options="header"] +|================= +pkt,time sec,template,fid,flow-pkt-id,client_ip,client_port,server_ip ,desc + 1 , 0.010000 , 0 , 1 , 1 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 2 , 0.060000 , 0 , 1 , 2 , 16.0.0.1 , 1024 , 48.0.0.1 , <- + 3 , 0.210000 , 0 , 2 , 1 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 4 , 0.260000 , 0 , 2 , 2 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 5 , 0.310000 , 0 , 3 , 1 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 6 , 0.360000 , 0 , 3 , 2 , 16.0.0.3 , 1024 , 48.0.0.3 , <- + 7 , 0.410000 , 0 , 4 , 1 , 16.0.0.4 , 1024 , 48.0.0.4 , -> + 8 , 0.460000 , 0 , 4 , 2 , 16.0.0.4 , 1024 , 48.0.0.4 , <- + 9 , 0.510000 , 0 , 5 , 1 , 16.0.0.5 , 1024 , 48.0.0.5 , -> + 10 , 0.560000 , 0 , 5 , 2 , 16.0.0.5 , 1024 , 48.0.0.5 , <- + 11 , 0.610000 , 0 , 6 , 1 , 16.0.0.6 , 1024 , 48.0.0.6 , -> + 12 , 0.660000 , 0 , 6 , 2 , 16.0.0.6 , 1024 , 48.0.0.6 , <- + 13 , 0.710000 , 0 , 7 , 1 , 16.0.0.7 , 1024 , 48.0.0.7 , -> + 14 , 0.760000 , 0 , 7 , 2 , 16.0.0.7 , 1024 , 48.0.0.7 , <- + 15 , 0.810000 , 0 , 8 , 1 , 16.0.0.8 , 1024 , 48.0.0.8 , -> + 16 , 0.860000 , 0 , 8 , 2 , 16.0.0.8 , 1024 , 48.0.0.8 , <- + 17 , 0.910000 , 0 , 9 , 1 , 16.0.0.9 , 1024 , 48.0.0.9 , -> + 18 , 0.960000 , 0 , 9 , 2 , 16.0.0.9 , 1024 , 48.0.0.9 , <- + 19 , 1.010000 , 0 , a , 1 , 16.0.0.10 , 1024 , 48.0.0.10 , -> + 20 , 1.060000 , 0 , a , 2 , 16.0.0.10 , 1024 , 48.0.0.10 , <- +|================= + +Use the following to display the output as a chart, with: +x axis: time (seconds) +y axis: flow ID +The output indicates that there are 10 flows in 1 second, as expected, and the IPG is 50 msec + +//TBD: not sure what the "+ +" means ==> [hh] Ascii Doc break page + +ifndef::backend-docbook[] ++++++++++++++++++++++++++++++++++ +
+ + ++++++++++++++++++++++++++++++++++ +endif::backend-docbook[] + +[NOTE] +===================================================================== +Note the gap in the second flow generation. This is an expected schedular artifact and does not have an effect. +===================================================================== + +=== DNS, take flow IPG from pcap file + +In the following example the IPG is taken from the IPG itself. + +[source,python] +---- +- duration : 1.0 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.0.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 0 + udp_aging : 0 + mac : [0x00,0x00,0x00,0x01,0x00,0x00] + cap_ipg : true <1> + #cap_ipg_min : 30 + #cap_override_ipg : 200 + cap_info : + - name: cap2/dns.pcap + cps : 10.0 + ipg : 10000 + rtt : 10000 + w : 1 +---- +<1> IPG is taken from pcap. + + +.dns ipg from pcap file +[format="csv",cols="1^,2^,1^,1^,1^,2^,1^,2^,1^", options="header"] +|================= +pkt,time sec,template,fid,flow-pkt-id,client_ip,client_port,server_ip ,desc + 1 , 0.010000 , 0 , 1 , 1 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 2 , 0.030944 , 0 , 1 , 2 , 16.0.0.1 , 1024 , 48.0.0.1 , <- + 3 , 0.210000 , 0 , 2 , 1 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 4 , 0.230944 , 0 , 2 , 2 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 5 , 0.310000 , 0 , 3 , 1 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 6 , 0.330944 , 0 , 3 , 2 , 16.0.0.3 , 1024 , 48.0.0.3 , <- + 7 , 0.410000 , 0 , 4 , 1 , 16.0.0.4 , 1024 , 48.0.0.4 , -> + 8 , 0.430944 , 0 , 4 , 2 , 16.0.0.4 , 1024 , 48.0.0.4 , <- + 9 , 0.510000 , 0 , 5 , 1 , 16.0.0.5 , 1024 , 48.0.0.5 , -> + 10 , 0.530944 , 0 , 5 , 2 , 16.0.0.5 , 1024 , 48.0.0.5 , <- + 11 , 0.610000 , 0 , 6 , 1 , 16.0.0.6 , 1024 , 48.0.0.6 , -> + 12 , 0.630944 , 0 , 6 , 2 , 16.0.0.6 , 1024 , 48.0.0.6 , <- + 13 , 0.710000 , 0 , 7 , 1 , 16.0.0.7 , 1024 , 48.0.0.7 , -> + 14 , 0.730944 , 0 , 7 , 2 , 16.0.0.7 , 1024 , 48.0.0.7 , <- + 15 , 0.810000 , 0 , 8 , 1 , 16.0.0.8 , 1024 , 48.0.0.8 , -> + 16 , 0.830944 , 0 , 8 , 2 , 16.0.0.8 , 1024 , 48.0.0.8 , <- + 17 , 0.910000 , 0 , 9 , 1 , 16.0.0.9 , 1024 , 48.0.0.9 , -> + 18 , 0.930944 , 0 , 9 , 2 , 16.0.0.9 , 1024 , 48.0.0.9 , <- + 19 , 1.010000 , 0 , a , 1 , 16.0.0.10 , 1024 , 48.0.0.10 , -> + 20 , 1.030944 , 0 , a , 2 , 16.0.0.10 , 1024 , 48.0.0.10 , <- +|================= + +In this example, the IPG was taken from the pcap file, which is closer to 20 msec and not 50 msec (taken from the configuration file). + +[source,python] +---- + #cap_ipg_min : 30 <1> + #cap_override_ipg : 200 <2> +---- +<1> Sets the minimum IPG (microseconds) which should be override : ( if (pkt_ipg Value to override (microseconds). + + +ifndef::backend-docbook[] ++++++++++++++++++++++++++++++++++ +
+ + ++++++++++++++++++++++++++++++++++ +endif::backend-docbook[] + + +=== DNS, Set one server ip + +In this example the server IP is taken from the template. + +[source,python] +---- +- duration : 10.0 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.1.255" + servers_start : "48.0.0.1" + servers_end : "48.0.0.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 1 + udp_aging : 1 + mac : [0x00,0x00,0x00,0x01,0x00,0x00] + cap_ipg : true + #cap_ipg_min : 30 + #cap_override_ipg : 200 + cap_info : + - name: cap2/dns.pcap + cps : 1.0 + ipg : 10000 + rtt : 10000 + server_addr : "48.0.0.7" <1> + one_app_server : true <2> + w : 1 +---- +<1> All templates will use the same server. +<2> Must be set to "true". + + +.dns ipg from pcap file +[format="csv",cols="1^,2^,1^,1^,2^,1^,2^,1^", options="header"] +|================= +pkt,time sec,fid,flow-pkt-id,client_ip,client_port,server_ip ,desc + 1 , 0.010000 , 1 , 1 , 16.0.0.1 , 1024 , 48.0.0.7 , -> + 2 , 0.030944 , 1 , 2 , 16.0.0.1 , 1024 , 48.0.0.7 , <- + 3 , 2.010000 , 2 , 1 , 16.0.0.2 , 1024 , 48.0.0.7 , -> + 4 , 2.030944 , 2 , 2 , 16.0.0.2 , 1024 , 48.0.0.7 , <- + 5 , 3.010000 , 3 , 1 , 16.0.0.3 , 1024 , 48.0.0.7 , -> + 6 , 3.030944 , 3 , 2 , 16.0.0.3 , 1024 , 48.0.0.7 , <- + 7 , 4.010000 , 4 , 1 , 16.0.0.4 , 1024 , 48.0.0.7 , -> + 8 , 4.030944 , 4 , 2 , 16.0.0.4 , 1024 , 48.0.0.7 , <- + 9 , 5.010000 , 5 , 1 , 16.0.0.5 , 1024 , 48.0.0.7 , -> + 10 , 5.030944 , 5 , 2 , 16.0.0.5 , 1024 , 48.0.0.7 , <- + 11 , 6.010000 , 6 , 1 , 16.0.0.6 , 1024 , 48.0.0.7 , -> + 12 , 6.030944 , 6 , 2 , 16.0.0.6 , 1024 , 48.0.0.7 , <- + 13 , 7.010000 , 7 , 1 , 16.0.0.7 , 1024 , 48.0.0.7 , -> + 14 , 7.030944 , 7 , 2 , 16.0.0.7 , 1024 , 48.0.0.7 , <- + 15 , 8.010000 , 8 , 1 , 16.0.0.8 , 1024 , 48.0.0.7 , -> + 16 , 8.030944 , 8 , 2 , 16.0.0.8 , 1024 , 48.0.0.7 , <- + 17 , 9.010000 , 9 , 1 , 16.0.0.9 , 1024 , 48.0.0.7 , -> + 18 , 9.030944 , 9 , 2 , 16.0.0.9 , 1024 , 48.0.0.7 , <- + 19 , 10.010000 , a , 1 , 16.0.0.10 , 1024 , 48.0.0.7 , -> + 20 , 10.030944 , a , 2 , 16.0.0.10 , 1024 , 48.0.0.7 , <- +|================= + + +=== DNS, Reduce the number of clients +//TBD: clarify + +[source,python] +---- +- duration : 10.0 + generator : + distribution : "seq" + clients_start : "16.0.0.1" <1> + clients_end : "16.0.0.1" + servers_start : "48.0.0.1" + servers_end : "48.0.0.3" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 1 + udp_aging : 1 + mac : [0x00,0x00,0x00,0x01,0x00,0x00] + cap_ipg : true + #cap_ipg_min : 30 + #cap_override_ipg : 200 + cap_info : + - name: cap2/dns.pcap + cps : 1.0 + ipg : 10000 + rtt : 10000 + w : 1 +---- +<1> Only one client. + + +.dns ipg from pcap file +[format="csv",cols="1^,2^,1^,1^,2^,1^,2^,1^", options="header"] +|================= +pkt,time sec,fid,flow-pkt-id,client_ip,client_port,server_ip ,desc + 1 , 0.010000 , 1 , 1 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 2 , 0.030944 , 1 , 2 , 16.0.0.1 , 1024 , 48.0.0.1 , <- + 3 , 2.010000 , 2 , 1 , 16.0.0.1 , 1025 , 48.0.0.2 , -> + 4 , 2.030944 , 2 , 2 , 16.0.0.1 , 1025 , 48.0.0.2 , <- + 5 , 3.010000 , 3 , 1 , 16.0.0.1 , 1026 , 48.0.0.3 , -> + 6 , 3.030944 , 3 , 2 , 16.0.0.1 , 1026 , 48.0.0.3 , <- + 7 , 4.010000 , 4 , 1 , 16.0.0.1 , 1027 , 48.0.0.4 , -> + 8 , 4.030944 , 4 , 2 , 16.0.0.1 , 1027 , 48.0.0.4 , <- + 9 , 5.010000 , 5 , 1 , 16.0.0.1 , 1028 , 48.0.0.5 , -> + 10 , 5.030944 , 5 , 2 , 16.0.0.1 , 1028 , 48.0.0.5 , <- + 11 , 6.010000 , 6 , 1 , 16.0.0.1 , 1029 , 48.0.0.6 , -> + 12 , 6.030944 , 6 , 2 , 16.0.0.1 , 1029 , 48.0.0.6 , <- + 13 , 7.010000 , 7 , 1 , 16.0.0.1 , 1030 , 48.0.0.7 , -> + 14 , 7.030944 , 7 , 2 , 16.0.0.1 , 1030 , 48.0.0.7 , <- + 15 , 8.010000 , 8 , 1 , 16.0.0.1 , 1031 , 48.0.0.8 , -> + 16 , 8.030944 , 8 , 2 , 16.0.0.1 , 1031 , 48.0.0.8 , <- + 17 , 9.010000 , 9 , 1 , 16.0.0.1 , 1032 , 48.0.0.9 , -> + 18 , 9.030944 , 9 , 2 , 16.0.0.1 , 1032 , 48.0.0.9 , <- + 19 , 10.010000 , a , 1 , 16.0.0.1 , 1033 , 48.0.0.10 , -> + 20 , 10.030944 , a , 2 , 16.0.0.1 , 1033 , 48.0.0.10 , <- +|================= + +In this case there is only one client so only ports are used to distinc the flows +you need to be sure that you have enogth free sockets when running TRex in high rates + +[source,python] +---- + Active-flows : 0 Clients : 1 <1> Socket-util : 0.0000 % <2> + Open-flows : 1 Servers : 254 Socket : 1 Socket/Clients : 0.0 + drop-rate : 0.00 bps +---- +<1> Number of clients +<2> sockets utilization (should be lowwer than 20%, elarge the number of clients in case of an issue). + +=== DNS, W=1 + +`w` is a tunable to the IP clients/servers generator. w=1 is the default behavior. +Setting `w=2` configures a burst of two allocations from the same client. See the following example. + +[source,python] +---- +- duration : 10.0 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.10" + servers_start : "48.0.0.1" + servers_end : "48.0.0.3" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 1 + udp_aging : 1 + mac : [0x00,0x00,0x00,0x01,0x00,0x00] + cap_ipg : true + #cap_ipg_min : 30 + #cap_override_ipg : 200 + cap_info : + - name: cap2/dns.pcap + cps : 1.0 + ipg : 10000 + rtt : 10000 + w : 2 <1> +---- +<1> Two clients will be allocated from the same template. + + +.DNS ipg from pcap file +[format="csv",cols="1^,2^,1^,1^,2^,1^,2^,1^", options="header"] +|================= +pkt,time sec,fid,flow-pkt-id,client_ip,client_port,server_ip ,desc + 1 , 0.010000 , 1 , 1 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 2 , 0.030944 , 1 , 2 , 16.0.0.1 , 1024 , 48.0.0.1 , <- + 3 , 2.010000 , 2 , 1 , 16.0.0.1 , 1025 , 48.0.0.1 , -> + 4 , 2.030944 , 2 , 2 , 16.0.0.1 , 1025 , 48.0.0.1 , <- + 5 , 3.010000 , 3 , 1 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 6 , 3.030944 , 3 , 2 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 7 , 4.010000 , 4 , 1 , 16.0.0.2 , 1025 , 48.0.0.2 , -> + 8 , 4.030944 , 4 , 2 , 16.0.0.2 , 1025 , 48.0.0.2 , <- + 9 , 5.010000 , 5 , 1 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 10 , 5.030944 , 5 , 2 , 16.0.0.3 , 1024 , 48.0.0.3 , <- + 11 , 6.010000 , 6 , 1 , 16.0.0.3 , 1025 , 48.0.0.3 , -> + 12 , 6.030944 , 6 , 2 , 16.0.0.3 , 1025 , 48.0.0.3 , <- + 13 , 7.010000 , 7 , 1 , 16.0.0.4 , 1024 , 48.0.0.4 , -> + 14 , 7.030944 , 7 , 2 , 16.0.0.4 , 1024 , 48.0.0.4 , <- + 15 , 8.010000 , 8 , 1 , 16.0.0.4 , 1025 , 48.0.0.4 , -> + 16 , 8.030944 , 8 , 2 , 16.0.0.4 , 1025 , 48.0.0.4 , <- + 17 , 9.010000 , 9 , 1 , 16.0.0.5 , 1024 , 48.0.0.5 , -> + 18 , 9.030944 , 9 , 2 , 16.0.0.5 , 1024 , 48.0.0.5 , <- + 19 , 10.010000 , a , 1 , 16.0.0.5 , 1025 , 48.0.0.5 , -> + 20 , 10.030944 , a , 2 , 16.0.0.5 , 1025 , 48.0.0.5 , <- +|================= + + +=== Mixing HTTP and DNS template + +The following example combines elements of HTTP and DNS templates: + + +[source,python] +---- +- duration : 1.0 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.10" + servers_start : "48.0.0.1" + servers_end : "48.0.0.3" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 1 + udp_aging : 1 + mac : [0x00,0x00,0x00,0x01,0x00,0x00] + cap_ipg : true + cap_info : + - name: cap2/dns.pcap + cps : 10.0 <1> + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_http_browsing_0.pcap + cps : 2.0 <1> + ipg : 10000 + rtt : 10000 + w : 1 + +---- +<1> Same CPS for both templates. + +This creates the following output: + +.DNS ipg from pcap file +[format="csv",cols="1^,2^,1^,1^,1^,2^,1^,2^,1^", options="header"] +|================= +pkt,time sec,template,fid,flow-pkt-id,client_ip,client_port,server_ip ,desc + 1 , 0.010000 , 0 , 1 , 1 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 2 , 0.030944 , 0 , 1 , 2 , 16.0.0.1 , 1024 , 48.0.0.1 , <- + 3 , 0.093333 , 1 , 2 , 1 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 4 , 0.104362 , 1 , 2 , 2 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 5 , 0.115385 , 1 , 2 , 3 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 6 , 0.115394 , 1 , 2 , 4 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 7 , 0.126471 , 1 , 2 , 5 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 8 , 0.126484 , 1 , 2 , 6 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 9 , 0.137530 , 1 , 2 , 7 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 10 , 0.148609 , 1 , 2 , 8 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 11 , 0.148621 , 1 , 2 , 9 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 12 , 0.148635 , 1 , 2 , 10 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 13 , 0.159663 , 1 , 2 , 11 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 14 , 0.170750 , 1 , 2 , 12 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 15 , 0.170762 , 1 , 2 , 13 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 16 , 0.170774 , 1 , 2 , 14 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 17 , 0.176667 , 0 , 3 , 1 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 18 , 0.181805 , 1 , 2 , 15 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 19 , 0.181815 , 1 , 2 , 16 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 20 , 0.192889 , 1 , 2 , 17 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 21 , 0.192902 , 1 , 2 , 18 , 16.0.0.2 , 1024 , 48.0.0.2 , <- +|================= + +Template_id:: + 0: DNS template + 1: HTTP template + + +ifndef::backend-docbook[] ++++++++++++++++++++++++++++++++++ +
+ + ++++++++++++++++++++++++++++++++++ +endif::backend-docbook[] + +The output above illustrates two HTTP flows and ten DNS flows in 1 second, as expected. + + +=== SFR traffic YAML + +SFR traffic includes a combination of traffic templates. This traffic mix in the example below was defined by SFR France. +This SFR traffic profile is used as our traffic profile for our ASR1k/ISR-G2 benchmark. It is also possible to use EMIX instead of IMIX traffic. + +The traffic was recorded from a Spirent C100 with a Pagent that introduce 10msec delay from client and server side. + +[source,python] +---- +- duration : 0.1 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.1.255" + servers_start : "48.0.0.1" + servers_end : "48.0.20.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 0 + udp_aging : 0 + mac : [0x0,0x0,0x0,0x1,0x0,0x00] + cap_ipg : true + cap_info : + - name: avl/delay_10_http_get_0.pcap + cps : 404.52 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_http_post_0.pcap + cps : 404.52 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_https_0.pcap + cps : 130.8745 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_http_browsing_0.pcap + cps : 709.89 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_exchange_0.pcap + cps : 253.81 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_mail_pop_0.pcap + cps : 4.759 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_mail_pop_1.pcap + cps : 4.759 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_mail_pop_2.pcap + cps : 4.759 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_oracle_0.pcap + cps : 79.3178 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_rtp_160k_full.pcap + cps : 2.776 + ipg : 10000 + rtt : 10000 + w : 1 + one_app_server : false + plugin_id : 1 <2> + - name: avl/delay_10_rtp_250k_full.pcap + cps : 1.982 + ipg : 10000 + rtt : 10000 + w : 1 + one_app_server : false + plugin_id : 1 + - name: avl/delay_10_smtp_0.pcap + cps : 7.3369 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_smtp_1.pcap + cps : 7.3369 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_smtp_2.pcap + cps : 7.3369 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_video_call_0.pcap + cps : 11.8976 + ipg : 10000 + rtt : 10000 + w : 1 + one_app_server : false + - name: avl/delay_10_sip_video_call_full.pcap + cps : 29.347 + ipg : 10000 + rtt : 10000 + w : 1 + plugin_id : 2 <1> + one_app_server : false + - name: avl/delay_10_citrix_0.pcap + cps : 43.6248 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_dns_0.pcap + cps : 1975.015 + ipg : 10000 + rtt : 10000 + w : 1 + wlength : 1 +---- +<1> Plugin for SIP protocol, used to replace the IP/port in the control flow base on the data-flow. +//TBD: I'm placing your note into a TBD - (what are plugins should have a seperate chapter) +<2> Plugin for RTSP protocol used to replace the IP/port in the control flow base on the data-flow. + + + +ifndef::backend-docbook[] ++++++++++++++++++++++++++++++++++ +
+ + ++++++++++++++++++++++++++++++++++ +endif::backend-docbook[] + + +=== TRex command line + +TRex commands typically include the following main arguments, but only `-f` and `-d` are required. + +[source,bash] +---- +$.sudo /t-rex-64 -f [traffic_yaml] -m [muti] -d [duration] -l [Hz=1000] -c [cores] +---- + +*-f=TRAFIC_YAML_FILE*:: + YAML traffic configuration file. + +*-m=MUL*:: + Factor for bandwidth (multiplies the CPS of each template by this value). + +*-d=DURATION*:: + Duration of the test (sec). Default: 0 + +*-l=HZ*:: + Rate (Hz) for running the latency daemon. Example: -l 1000 runs 1000 pkt/sec from each interface. A value of zero (0) disables the latency check. + +*-c=CORES*:: + Number of cores. Use 4 for TRex 40Gb/sec. Monitor the CPU% of TRex - it should be ~50%. + + +The full reference can be found xref:cml-line[here] + +==== TRex command line examples + +.Simple HTTP 1Gb/sec for 100 sec +[source,bash] +---- +$.sudo /t-rex-64 -f cap2/simple_http.yaml -c 4 -m 100 -d 100 +---- + +.Simple HTTP 1Gb/sec with latency for 100 sec +[source,bash] +---- +$.sudo /t-rex-64 -f cap2/simple_http.yaml -c 4 -m 100 -d 100 -l 1000 +---- + +.SFR 35Gb/sec traffic +[source,bash] +---- +$.sudo /t-rex-64 -f avl/sfr_delay_10_1g.yaml -c 4 -m 35 -d 100 -p +---- + +.SFR 20Gb/sec traffic with latency +[source,bash] +---- +$.sudo /t-rex-64 -f avl/sfr_delay_10_1g.yaml -c 4 -m 20 -d 100 -l 1000 +---- + +.SFR ipv6 20Gb/sec traffic with latency +[source,bash] +---- +$.sudo /t-rex-64 -f avl/sfr_delay_10_1g_no_bundeling.yaml -c 4 -m 20 -d 100 -l 1000 --ipv6 +---- + +.Simple HTTP 1Gb/sec with NAT translation support +[source,bash] +---- +$.sudo /t-rex-64 -f cap2/simple_http.yaml -c 4 -m 100 -d 100 -l 1000 --learn +---- + +.IMIX 1G/sec ,1600 flows +[source,bash] +---- +$.sudo /t-rex-64 -f cap2/imix_fast_1g.yaml -c 4 -m 1 -d 100 -l 1000 +---- + +.IMIX 1Gb/sec,100K flows +[source,bash] +---- +$.sudo /t-rex-64 -f cap2/imix_fast_1g_100k.yaml -c 4 -m 1 -d 100 -l 1000 +---- + +.64bytes ~1Gb/sec,1600 flows +[source,bash] +---- +$.sudo /t-rex-64 -f cap2/imix_64.yaml -c 4 -m 1 -d 100 -l 1000 +---- + +=== Traffic profiles provided with the TRex package + + +[options="header",cols="1,3",width="100%"] +|================= +| name | description +| cap2/dns.yaml | simple dns pcap file +| cap2/http_simple.yaml | simple http cap file +| avl/sfr_delay_10_1g_no_bundeling.yaml | sfr traffic profile capture from Avalanche - Spirent without bundeling support with RTT=10msec ( a delay machine), this can be used with --ipv6 and --learn mode +| avl/sfr_delay_10_1g.yaml | head-end sfr traffic profile capture from Avalanche - Spirent with bundeling support with RTT=10msec ( a delay machine), it is normalized to 1Gb/sec for m=1 +| avl/sfr_branch_profile_delay_10.yaml | branch sfr profile capture from Avalanche - Spirent with bundeling support with RTT=10msec it, is normalized to 1Gb/sec for m=1 +| cap2/imix_fast_1g.yaml | imix profile with 1600 flows normalized to 1Gb/sec. +| cap2/imix_fast_1g_100k_flows.yaml | imix profile with 100k flows normalized to 1Gb/sec. +| cap2/imix_64.yaml | 64byte UDP packets profile +|======================== + + +=== Stateless traffic generation + +With this feature you can "repeat" flows and create stateless, *IXIA* like streams. +After injecting the number of flows defined by `limit`, TRex repeats the same flows. If all template has a `limit` the CPS will be zero after a time as there are no new flows after the first iteration. + +*IMIX support:*:: +Example: +[source,bash] +---- +$sudo ./t-rex-64 -f cap2/imix_64.yaml -d 1000 -m 40000 -c 4 -p +---- + +[WARNING] +===================================================================== +The *-p* is used here to send the client side packets from both interfaces. +(Normally it is sent only from client ports only.) +Typically, the traffic client side is sent from the TRex client port; with this option, the port is selected by the client IP. +All the flow packets are sent from the same interface. This may create an issue with routing, as the client's IP will be sent from the server interface. PBR router configuration solves this issue but cannot be used in all cases. So use this `-p` option carefully. +===================================================================== + + +.imix_64.yaml +[source,python] +---- + cap_info : + - name: cap2/udp_64B.pcap + cps : 1000.0 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 1000 <1> +---- +<1> Repeats the flows in a loop, generating 1000 flows from this type. In this example, udp_64B includes only one packet. + +The cap file "cap2/udp_64B.pcap" includes only one packet of 64B. This configuration file creates 1000 flows that will be repeated as follows: +f1 , f2 , f3 .... f1000 , f1 , f2 ... +where the PPS == CPS for -m=1. In this case it will have PPS=1000 in sec for -m==1. +It is possible to mix stateless templates and stateful templates. + +.Imix YAML `cap2/imix_fast_1g.yaml` example +[source,python] +---- +- duration : 3 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.255.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 0 + udp_aging : 0 + mac : [0x0,0x0,0x0,0x1,0x0,0x00] + cap_info : + - name: cap2/udp_64B.pcap + cps : 90615 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 199 + - name: cap2/udp_576B.pcap + cps : 64725 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 199 + - name: cap2/udp_1500B.pcap + cps : 12945 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 199 + - name: cap2/udp_64B.pcap + cps : 90615 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 199 + - name: cap2/udp_576B.pcap + cps : 64725 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 199 + - name: cap2/udp_1500B.pcap + cps : 12945 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 199 +---- +The templates are duplicate here to better utilize DRAM and to get better performance. +//TBD: What exactly repeates the templates - TRex, script, ? Also, how does that better utilize DRAM. + +.Imix YAML `cap2/imix_fast_1g_100k_flows.yaml` example +[source,python] +---- +- duration : 3 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.255.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 0 + udp_aging : 0 + mac : [0x0,0x0,0x0,0x1,0x0,0x00] + cap_info : + - name: cap2/udp_64B.pcap + cps : 90615 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 16666 + - name: cap2/udp_576B.pcap + cps : 64725 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 16666 + - name: cap2/udp_1500B.pcap + cps : 12945 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 16667 + - name: cap2/udp_64B.pcap + cps : 90615 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 16667 + - name: cap2/udp_576B.pcap + cps : 64725 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 16667 + - name: cap2/udp_1500B.pcap + cps : 12945 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 16667 +---- + +The following example of a simple simulation includes 3 flows, with CPS=10. + +[source,python] +---- +$more cap2/imix_example.yaml +# +# Simple IMIX test (7x64B, 5x576B, 1x1500B) +# +- duration : 3 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.255.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 0 + udp_aging : 0 + mac : [0x0,0x0,0x0,0x1,0x0,0x00] + cap_info : + - name: cap2/udp_64B.pcap + cps : 10.0 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 3 <1> + +---- +<1> Number of flows: 3 + + +[source,bash] +---- +./bp-sim-32-debug -f cap2/imix_example.yaml -o my.erf -v 3 > a.txt +---- + +.IMIX example limit=3 +[format="csv",cols="1^,2^,1^,1^,1^,2^,1^,2^,1^", options="header"] +|================= +pkt,time sec,template,fid,flow-pkt-id,client_ip,client_port,server_ip ,desc + 1 , 0.010000 , 0 , 1 , 1 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 2 , 0.210000 , 0 , 2 , 0 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 3 , 0.310000 , 0 , 3 , 0 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 4 , 0.310000 , 0 , 1 , 0 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 5 , 0.510000 , 0 , 2 , 0 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 6 , 0.610000 , 0 , 3 , 0 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 7 , 0.610000 , 0 , 1 , 0 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 8 , 0.810000 , 0 , 2 , 0 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 9 , 0.910000 , 0 , 1 , 0 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 10 , 0.910000 , 0 , 3 , 0 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 11 , 1.110000 , 0 , 2 , 0 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 12 , 1.210000 , 0 , 3 , 0 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 13 , 1.210000 , 0 , 1 , 0 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 14 , 1.410000 , 0 , 2 , 0 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 15 , 1.510000 , 0 , 1 , 0 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 16 , 1.510000 , 0 , 3 , 0 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 17 , 1.710000 , 0 , 2 , 0 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 18 , 1.810000 , 0 , 3 , 0 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 19 , 1.810000 , 0 , 1 , 0 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 20 , 2.010000 , 0 , 2 , 0 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 21 , 2.110000 , 0 , 1 , 0 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 22 , 2.110000 , 0 , 3 , 0 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 23 , 2.310000 , 0 , 2 , 0 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 24 , 2.410000 , 0 , 3 , 0 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 25 , 2.410000 , 0 , 1 , 0 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 26 , 2.610000 , 0 , 2 , 0 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 27 , 2.710000 , 0 , 1 , 0 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 28 , 2.710000 , 0 , 3 , 0 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 29 , 2.910000 , 0 , 2 , 0 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 30 , 3.010000 , 0 , 3 , 0 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 31 , 3.010000 , 0 , 1 , 0 , 16.0.0.1 , 1024 , 48.0.0.1 , -> +|================= + +* Average CPS: 10 packets per second (30 packets in 3 sec). +* Total of 3 flows, as specified in the configuration file. +* The flows come in bursts, as specified in the configuration file. + + +ifndef::backend-docbook[] ++++++++++++++++++++++++++++++++++ +
+ + ++++++++++++++++++++++++++++++++++ +endif::backend-docbook[] + +=== Clients/Servers IP allocation scheme + +Currently, there is one global IP pool for clients and servers. It serves all templates. all the templates will allocate IP from this global pool. +Each TRex client/server "dual-port" (pair of ports, such as port 0 for client, port 1 for server) has it own mask offset taken from the YAML. The mask offset is called `dual_port_mask`. + +Example: + +[source,python] +---- +generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.0.255" + dual_port_mask : "1.0.0.0" <1> + tcp_aging : 0 + udp_aging : 0 +---- +<1> Mask to add per dual-port pair. +The reason we introduce dual_port_mask is to make static route configurable. With this mask, different ports has different prefix. + +//TBD: needs clarification - this is the format of a port mask? + +With four ports, TRex produces the following output: + +[source,python] +---- + dual-0 (0,1) --> C (16.0.0.1-16.0.0.128 ) <-> S( 48.0.0.1 - 48.0.0.128) + dual-1 (2,3) --> C (17.0.0.129-17.0.0.255 ) <-> S( 49.0.0.129 - 49.0.0.255) + mask ("1.0.0.0") +---- + +In the case of setting dual-port_mask as 0.0.0.0, both ports will use the same range of ip. +With four ports and dual_port_mask as 0.0.0.0, the ip range is : + +[source,python] +---- + dual-0 (0,1) --> C (16.0.0.1-16.0.0.128 ) <-> S( 48.0.0.1 - 48.0.0.128) + dual-1 (2,3) --> C (16.0.0.129-16.0.0.255 ) <-> S( 48.0.0.129 - 48.0.0.255) +---- + +//TBD: not clear what the following 5 points are referring to. This looks like it should be a continuation of the footnotes for the example a few lines up. +- Number of clients : 255 +- Number of servers : 255 +- The mask defined by dual_port_mask (1.0.0.0) is added for each dual-port pair, but the total number of clients/servers from YAML will be constant and does not depend on the amount of dual ports. +- TCP/UDP aging is required when the number of clients is very small and the template defines a very long duration. +This is the time it takes to return the socket to the pool. +//TBD: not clear - is TCP/UDP aging an option used when the template defines a long duration? also, should specify what "very long" refers to. +- In the current version, the only option for distribution is "seq". + + +*Router configuration for this mode:*:: + +PBR is not necessary. The following configuration is sufficient. +//TBD: clarify + +[source,python] +---- +interface TenGigabitEthernet1/0/0 <1> + mac-address 0000.0001.0000 + mtu 4000 + ip address 11.11.11.11 255.255.255.0 +! +` +interface TenGigabitEthernet1/1/0 <2> + mac-address 0000.0001.0000 + mtu 4000 + ip address 22.11.11.11 255.255.255.0 +! +interface TenGigabitEthernet1/2/0 <3> + mac-address 0000.0001.0000 + mtu 4000 + ip address 33.11.11.11 255.255.255.0 +! +interface TenGigabitEthernet1/3/0 <4> + mac-address 0000.0001.0000 + mtu 4000 + ip address 44.11.11.11 255.255.255.0 + load-interval 30 + + +ip route 16.0.0.0 255.0.0.0 22.11.11.12 +ip route 48.0.0.0 255.0.0.0 11.11.11.12 +ip route 17.0.0.0 255.0.0.0 44.11.11.12 +ip route 49.0.0.0 255.0.0.0 33.11.11.12 +---- +<1> Connected to TRex port 0 (client side) +<2> Connected to TRex port 1 (server side) +<3> Connected to TRex port 2 (client side) +<4> Connected to TRex port 3(server side) + +*One server:*:: + +To support a template with one server, you can add a new YAML server_addr ip. Each dual-port pair will be assigned a separate server (in compliance with the mask). +//TBD: clarify + +[source,python] +---- +- name: cap2/dns.pcap + cps : 1.0 + ipg : 10000 + rtt : 10000 + w : 1 + server_addr : "48.0.0.1" <1> + one_app_server : true <2> + wlength : 1 +---- +<1> Server IPv4 address. +<2> Enable one server mode. + +*w/wlength:*:: +//TBD: looks like this should be a continuation of the footnotes as in 1 and 2 above. + +not require to configure them, user 1 +//TBD: ? + +*new statistic:*:: + +[source,python] +---- + Active-flows : 19509 Clients : 504 Socket-util : 0.0670 % + Open-flows : 247395 Servers : 65408 Socket : 21277 Socket/Clients : 42.2 +---- + + +[NOTE] +===================================================================== +* No backward compatibility with the old generator YAML format. +* When using -p option, TRex will not comply with the static route rules. Server-side traffic may be sent from the client side (port 0) and vice-versa. Use the -p option only with PBR configuration when the router, switch p1<->p2. +//TBD: "when router..." unclear +* VLAN (sub interface feature) does not comply with static route rules. Use it only with PBR. + VLAN0 <-> VALN1 per interface + vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 } +* Limitation: When using a template with plugins (bundles), the number of servers must be higher than the number of clients. +===================================================================== + +==== More Details about IP allocations + +Each time a new flow is creaed, TRex allocates a new Client IP/port and Server IP. This 3-tuple should be distinct among active flows. + +Currently, only sequcency distribution is supported in IP allocation. That means the IP address is increased one by one. + +Let's say if we have 2 candidate IPs in the pool: 16.0.0.1 and 16.0.0.2. So the sequence of allocated clients should be something like this: + +[source,python] +---- +16.0.0.0.1 [1024] +16.0.0.0.2 [1024] +16.0.0.0.1 [1025] +16.0.0.0.2 [1025] +---- + +==== How to decide the PPS and BPS + +- Example of one flow with 4 packets +- Green are first packet of flow +- Lets say the client ip pool starts from 16.0.0.1, and the distribution is seq. + +image:images/ip_allocation.png[title="rigt"] + +latexmath:[$Total PPS = \sum_{k=0}^{n}(CPS_{k}\times {flow\_pkts}_{k})$] + +latexmath:[$Concurrent flow = \sum_{k=0}^{n}CPS_{k}\times flow\_duration_k $] + + +The above fomulars can be used to calculate the PPS. The TRex throughput depends on the PPS calculated above and the value of m (a multiplier assigned by TRex cli). + +The m value is a multiplier of total pcap files CPS. +CPS of pcap file is configured on yaml file. + +Let's take a simple example as below. + + +[source,python] +---- +cap_info : + - name: avl/first.pcap < -- has 2 packets + cps : 102.0 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/second.pcap < -- has 20 packets + cps : 50.0 + ipg : 10000 + rtt : 10000 + w : 1 +---- + +The throughput is: 'm*(CPS_1*flow_pkts+CPS_2*flow_pkts)' + +So if the m is set as 1, the total PPS is : 102*2+50*20 = 1204 PPS. + +The BPS depends on the packet size. You can refer to your packet size and get the BPS = PPS*Packet_size. + +==== Roadmap of Client/Server IP allocation + +We have several features under development for IP allocation. + +- *1) per-template generator* + +Multiple generators can be defined and assigned to different pcap file templates. + +The YAML configuration is something like this: + + +[source,python] +---- + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.1.255" + servers_start : "48.0.0.1" + servers_end : "48.0.20.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 0 + udp_aging : 0 + generator_clients : + - name : "c1" + distribution : "random" + ip_start : "38.0.0.1" + ip_end : "38.0.1.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 0 + udp_aging : 0 + generator_servers : + - name : "s1" + distribution : "seq" + ip_start : "58.0.0.1" + ip_end : "58.0.1.255" + dual_port_mask : "1.0.0.0 + cap_info : + - name: avl/delay_10_http_get_0.pcap + cps : 404.52 + ipg : 10000 + rtt : 10000 + w : 1 + - name: avl/delay_10_http_post_0.pcap + client_pool : "c1" + server_pool : "s1" + cps : 404.52 + ipg : 10000 + rtt : 10000 + w : 1 +---- + +- *2) More distributions will be supported (normal distribution, random distribution, etc)* + +Currently, only sequcence is supported. + +- *3) Histogram of tuple pool will be supported* + +This feature gives user more flexibility to define the IP generator. + +[source,python] +---- + generator : + client_pools: + - name : "a" + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.1.255" + tcp_aging : 0 + udp_aging : 0 + + - name : "b" + distribution : "random" + clients_start : 26.0.0.1" + clients_end : 26.0.1.255" + tcp_aging : 0 + udp_aging : 0 + + - name : "c" + pools_list : + - name:"a" + probability: 0.8 + - name:"b" + probability: 0.2 +---- + + + +=== Measure Jitter/Latency + +To measure jitter/latency on high priorty packets (one SCTP flow), use `-l [Hz]` where Hz defines the number of packets to send from each port per second. This option measures latency and jitter in the latency. The shell output is similar to the following: + +[source,python] +---- + Cpu Utilization : 0.1 % + if| tx_ok , rx_ok , rx ,error, average , max , Jitter<1> ,max + | , , check, , latency(usec),latency (usec) ,(usec) , window + -------------------------------------------------------------------------------------- + 0 | 1002, 1002, 2501, 0, 61 , 70, 3 | 60 60 + 1 | 1002, 1002, 2012, 0, 56 , 63, 2 | 50 51 + 2 | 1002, 1002, 2322, 0, 66 , 74, 5 | 68 59 + 3 | 1002, 1002, 1727, 0, 58 , 68, 2 | 52 49 + + Rx Check stats enabled + --------------------------------------------------------------------------------------- + rx check: avg/max/jitter latency, 94 , 744, 49<1> | 252 287 3 + + active flows: 10, fif: 308, drop: 0, errors: 0 + --------------------------------------------------------------------------------------- +---- +<1> Jitter information + + diff --git a/trex_config.asciidoc b/trex_config.asciidoc new file mode 100755 index 00000000..06bf3afc --- /dev/null +++ b/trex_config.asciidoc @@ -0,0 +1,299 @@ +TRex first time configuration +============================= +:author: hhaim with the Help of Amir Kroparo +:email: +:description: TRex Getting started - instalation guide +:revdate: 2014-11-01 +:revnumber: 0.1 +:deckjs_theme: swiss +:deckjs_transition: horizontal-slide +:scrollable: + + +++++++++++++++++++ + + + + + + + + + + + + + +++++++++++++++++++ + + +== Simple configuration + +* TRex does not implement ARP emulation +* This guide will help you to configure Cisco ASR1K to work with TRex +* TRex is directly connected to ASR1K ports. + +image::images/TrexConfig.png[title="TRex/Router setup"] +. TRex port 0 - clients side +. Router TenG 0/0/0 +. Router TenG 0/0/1 +. TRex port 1 - servers side + + +== TRex configuration + +* TRex act as both client and server side +* TRex port mac addrees should configure correctly, so packet generated from port 1 will get to 2 and vice-versa +* To use the config file you can add this switch `--cfg [file]` +* Or edit the configuration file in `/etc/trex_cfg.yaml` + +[source,python] +---- + - port_limit : 2 + port_info : # set eh mac addr + - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] <1> + src_mac : [0x2,0x0,0x0,0x2,0x0,0x00] <2> + - dest_mac : [0x3,0x0,0x0,0x3,0x0,0x00] <3> + src_mac : [0x4,0x0,0x0,0x4,0x0,0x00] <4> +---- +<1> Correspond to TRex port 0 - should be Router TenG 0/0/0 mac-address +<2> Should be distinc mac-addrees, router should be configure to sent to this mac-addrees +<3> Correspond to TRex port 0 - should be Router TenG 0/0/1 mac-address +<4> Should be distinc mac-addrees, router should be configure to sent to this mac-addrees + + +== Router configuration PBR part 1 + +* Router moves packets from port 0->1 and 1->0 without looking into IP addrees. + +* TenG 0/0/0 <-> TenG 0/0/1 + +*Router configuration:*:: + +[source,python] +---- +interface TenGigabitEthernet0/0/0 + mac-address 0100.0001.0000 <1> + mtu 4000 <2> + ip address 11.11.11.11 255.255.255.0 <3> + ip policy route-map p1_to_p2 <4> + load-interval 30 +! + +interface TenGigabitEthernet0/0/1 + mac-address 0300.0003.0000 <5> + mtu 4000 + ip address 12.11.11.11 255.255.255.0 + ip policy route-map p2_to_p1 + load-interval 30 +! +---- +<1> Configure mac-addrees to match TRex destination port-0 +<2> Set MTU +<3> Set an ip addrees ( routing can't work without this) +<4> Configure PBR policy - see next slide +<5> Configure mac-addrees to match TRex destination port-1 + +== Router configuration PBR part 2 + +[source,python] +---- + +route-map p1_to_p2 permit 10 + set ip next-hop 12.11.11.12 <1> +! +route-map p2_to_p1 permit 10 + set ip next-hop 11.11.11.12 <2> + +---- + +<1> Set the destination packet to be 12.11.11.12 which correspond to TenG 0/0/1 +<2> Set the destination packet to be 11.11.11.12 which correspond to TenG 0/0/0 + + +== Router configuration PBR part 3 + +* What about destination mac-address it should be TRex source mac-addrees? +* The folowing configuration addrees it + +[source,python] +---- + arp 11.11.11.12 0200.0002.0000 ARPA <1> + arp 12.11.11.12 0400.0004.0000 ARPA <2> +---- +<1> Destination mac-addrees of packets sent from If 0/0/0 is matched to TRex source mac-address port-0 +<2> Destination mac-addrees of packets sent from If 0/0/1 is matched to TRex source mac-address port-1 + +== Static-route configuration - TRex + +* You can set static range of IPs for client and server side + +[source,python] +---- +generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.0.240" + dual_port_mask : "1.0.0.0" + tcp_aging : 0 + udp_aging : 0 +---- + +* In this example, you should expect: +** Number of clients 255 +** Number of servers 240 + +== Static-route configuration - Router + +[source,python] +---- +interface TenGigabitEthernet0/0/0 + mac-address 0100.0001.0000 + mtu 4000 + ip address 11.11.11.11 255.255.255.0 +! +` +interface TenGigabitEthernet0/0/1 + mac-address 0300.0003.0000 + mtu 4000 + ip address 22.11.11.11 255.255.255.0 +! +ip route 16.0.0.0 255.0.0.0 22.11.11.12 <1> +ip route 48.0.0.0 255.0.0.0 11.11.11.12 <2> +---- +<1> Match the range of TRex YAML ( client side 0/0/0 ) +<2> Match the range of TRex YAML ( server side 0/0/1) + +== Verify configuration + +* To verify that TRex port-0 is connected to Router 0/0/0 and not 0/0/1 run + +........................................... +$./t-rex-64 -f cap2/dns.yaml -m 1 -d 100 -l 1000 --lo --lm 1 +........................................... +* It sends packets only from TRex port-0 ( `--lm 1` ) + + +* to send only from TRex port 1 do this: +........................................... +$./t-rex-64 -f cap2/dns.yaml -m 1 -d 100 -l 1000 --lo --lm 2 +........................................... + + +== Static-route configuration - IPV6 + +[source,python] +---- +interface TenGigabitEthernet1/0/0 + mac-address 0000.0001.0000 + mtu 4000 + ip address 11.11.11.11 255.255.255.0 + ip policy route-map p1_to_p2 + load-interval 30 + ipv6 enable #<1> + ipv6 address 2001:DB8:1111:2222::1/64 #<2> + ipv6 policy route-map ipv6_p1_to_p2 #<3> +! + + +ipv6 unicast-routing #<4> + +ipv6 neighbor 3001::2 TenGigabitEthernet0/1/0 0000.0002.0002 #<5> +ipv6 neighbor 2001::2 TenGigabitEthernet0/0/0 0000.0003.0002 + +route-map ipv6_p1_to_p2 permit 10 #<6> + set ipv6 next-hop 2001::2 +! +route-map ipv6_p2_to_p1 permit 10 + set ipv6 next-hop 3001::2 +! + + +csi-mcp-asr1k-40(config)#ipv6 route 4000::/64 2001::2 +csi-mcp-asr1k-40(config)#ipv6 route 5000::/64 3001::2 +---- +<1> Enable ipv6 +<2> Add ipv6 addrees +<3> Add pbr +<4> Enable ipv6 routing +<5> Mac-addr setting should be like T-Rex +<6> PBR configuraion + + + + + + diff --git a/trex_control_plane_design_phase1.asciidoc b/trex_control_plane_design_phase1.asciidoc new file mode 100755 index 00000000..cbd02670 --- /dev/null +++ b/trex_control_plane_design_phase1.asciidoc @@ -0,0 +1,516 @@ +T-Rex Control Plane Design - Phase 1 +==================================== +:author: Dan Klein +:email: +:revnumber: 1.0 +:quotes.++: +:numbered: + + +== Introduction + +=== T-Rex traffic generator + +T-Rex traffic generator is a tool design the benchmark platforms with realistic traffic. +This is a work-in-progress product, which is under constant developement, new features are added and support for more router's fuctionality is achieved. + +=== T-Rex Control Plane + +T-Rex control (phase 1) is the base API, based on which any future API will be developed. + +This document will describe the current control plane for T-Rex, and its scalable features as a directive for future developement. + +==== T-Rex Control Plane - Architecture and Deployment notes + +T-Rex control plane is based on a JSON RPC transactions between clients and server. + +Each T-Rex machine will have a server running on it, closely interacting with T-Rex (clients do not approach T-Rex directly). + +The server version (which runs as either a daemon or a CLI application) is deployed with T-Rex latest version, written in Python 2.7. +As future feature, and as multiple T-Rexes might run on the same machine, single server shall serve all T-Rexes running a machine. + +The control plane implementation is using the currently dumped data messaging from T-Rex's core via ZMQ publisher, running from core #1. +The server used as a Subscriptor for this data, manipulating the packets, and re-encodes it into JSON-RPC format for clients use. + +Since the entire process is taken place internally on the machine itself (using TCP connection with `localhost`), very little overhead is generated from outer network perspective. + +<<< + +The following image describes the general architecture of the control plane and how it interacts with the data plane of T-Rex. + +ifdef::backend-docbook[] +image::images/trex_control_plane_modules.png[title="Control Plane modules",align="center",width=450, link="images/trex_control_plane_modules.png"] +endif::backend-docbook[] + +ifdef::backend-xhtml11[] +image::images/trex_control_plane_modules.png[title="Control Plane modules",align="center",width=900, link="images/trex_control_plane_modules.png"] +endif::backend-xhtml11[] + +The Python test script block represents any automation code or external module that wishes to control T-Rex by interacting with its server. + +Such script can use other JSON-RPC based implementations of this CTRexClient module, as long as it corresponds with the known server methods and JSON-RPC protocol. + +At next phases, an under developement integrated module will serve the clients, hence eliminating even the internal TCP messaging on the machine footnote:[updating server side planned to have almost no affect on the client side]. + +== Using the API + +[NOTE] +Basic familiarity with T-Rex is recommended before using this tool. + +Further information can be learned from T-Rex manual: http://csi-wiki-01:8080/display/bpsim/Documentation[(T-Rex manual)] + + +=== The Server module + +The server module is responsible for handling all possible requests related to T-Rex (i.e. this is the only mechanism that interacts with remote clients). + +The server is built as a multithreaded application, and **must be launched on a T-Rex commands using `sudo` permissions**. + +The server application can run in one of two states: + + 1. **Live monitor**: this will run the server with live logging on the screen. To launch the server in this mode run `server/trex_server.py` file directly. + + 2. **Daemon application**: this will run the server as a background daemon process, and all logging will be saved into file, located at `/var/log/trex/` path. + + This is the common scenario, during which nothing is prompted into the screen, unless in case of error in server launching. + +==== Launching the server + +The server would run only on valid T-Rex machines or VM, due to delicate customization in used sub-modules, designed to eliminate the situation in which control and data plane packets are mixed. + +The server code is deployed by default with T-Rex (starting version 1.63 ) and can be launched from its path using the following command: + +`./trex_daemon_server [RUN_COMMAND] [options]` + +[NOTE] +The [RUN_COMMAND] is used only when server launched as a daemon application. + +Running this command with `--help` option will prompt the help menu, explaning all the available options. + +===== Daemon commands + +The following daemon commands are supported: + + 1. **`start`**: This option starts the daemon application of T-Rex server, using the following command options (detailed exmplanation on this next time). + + 2. **`stop`**: Stop the daemon application. + + 3. **`restart`**: Stop the current daemon proccess, then relaunch it with the provided parameters (the parameters must be re-entered). + + 3. **`show`**: Prompt whether the daemon is running or not. + +WARNING: restarting the daemon application will **truncate** the logfile. + +===== Server options commands + +The following describes the options for server launching, and applies to both daemon and live launching. + +Let's have a look on the help menu: + +---- +[root@trex-dan Server]# ./trex_daemon_server --help +[root@trex-dan Server]# usage: trex_deamon_server {start|stop|restart} [options] + + NOTE: start/stop/restart options only available when running in daemon mode + +Run server application for T-Rex traffic generator + +optional arguments: + -h, --help show this help message and exit + -p PORT, --daemon-port PORT + Select port on which the daemon runs. Default port is + 8090. + -z PORT, --zmq-port PORT + Select port on which the ZMQ module listens to T-Rex. + Default port is 4500. #<2> + -t PATH, --trex-path PATH + Specify the compiled T-Rex directory from which T-Rex + would run. Default path is: / #<1> + +[root@trex-dan Server]# +---- + +<1> Default path might change when launching the server in daemon or live mode. + +<2> ZMQ port must match the defined port of the platform, generally found at `/etc/trex_cfg.yaml`. + +The available options are: + + 1. **`-p, --daemon-port`**: set the port on which the server is listening to clients requests. + + Default listening server port is **`8090`**. + + 2. **`-z, --zmq-port`**: set the port on which the server is listening to zmq publication from T-Rex. + + Default listening server port is **`4500`**. + + 3. **`-t, --trex-path`**: set the path from which T-Rex is runned. This is especially helpful when more than one version of T-Rex is used or switched between. Although this field has default value, it is highly recommended to set it manually with each server launch. + +[NOTE] +When server is launched is first makes sure the trex-path is valid: the path 'exists' and granted with 'execution permissions.' If any of the conditions is not valid, the server will not launch. + + +=== The Client module + +The client is a Python based application that created `TRexClient` instances. + +Using class methods, the client interacts with T-Rex server, and enable it to perform the following commands: + + 1. Start T-Rex run (custom parameters supported). + + 2. Stop T-Rex run. + + 3. Check what is the T-Rex status (possible states: `Idle, Starting, Running`). + + 4. Poll (by customize sampling) the server and get live results from T-Rex **while still running**. + + 5. Get custom T-Rex stats based on a window of saved history of latest 'N' polling results. + +The clients is also based on Python 2.7, however unlike ther server, it can run on any machine who wishes to. + +In fact, the client side is simply a python library that interact with the werver using JSON-RPC (v2), hence if needed, anyone can write a library on any other language that will interact with the server ins the very same way. + + +==== `CTRexClient` module initialization + +As explained, `CTRexClient` is the main module to use when writing an T-Rex test-plan. + +This module holds the entire interaction with T-Rex server, and result contiaing via `result_obj`, which is an instance of `CTRexResult` class. + +The `CTRexClient` instance is initialized in the following way: + + 1. **T-Rex hostname**: represents the hostname on which the server is listening. Both hostname or IPv4 address will be a valid input. + + 2. **Server port**: the port on which the server listens to incoming client requests. This parameter value must be identical to `port` option configured in the server. + + 3. **History size**: The number of saved T-Rex samples. Based on this "window", some extra statistics and data are calculated. Default history size is 100 samples. + + 4. **verbose **: This boolean option will prompt extended output, if available, of each of the activated methods. For any method that interacts with T-Rex server, this will prompt the JSON-RPC request and response. + + This option is especially useful for developers who wishes to imitate the functionality of this client using other programming languages. + +**That's it!** + +Once these parameter has been passed, you're ready to interact woth T-Rex. + +[NOTE] +The most common initialization will simply use the hostname, such that common initilization lookes like: + +`trex = CTRexClient('trex_host_name')` + +==== `CTRexClient` module usage + +This section covers with great detail the usage of the client module. Each of the methods describes are class methods of `CTRexClient`. + + - **`start_trex (f, d, block_to_success, timeout, trex_cmd_options)`** + + Issue a request to start T-Rex with certain configuration. The server will only handle the request if the T-Rex is in `Idle` status. + + Once the status has been confirmed, T-Rex server will issue for this single client a token, so that only that client may abort running T-Rex session. + + `f` and `d` parameters are mandatory, as they are crutial parameter in setting T-Rex bahviour. Also, `d` parameter must be at least 30 seconds or larger. + By default (and by design) this method **blocks** until T-Rex status changes to either 'Running' or back to 'Idle'. + + - **`stop_trex()`** + + If (and only if) a certain client issued a run requested (and it accepted), this client may use this command to abort current run. + + This option is very useful especially when the real-time data from the T-Rex are utilized. + + - **`wait_until_kickoff_finish(timeout = 40)`** + + This method blocks until T-Rex T-Rex status changes to 'Running'. In case of error an exception will be thrown. + + The 'timeout' parameter sets the maximum waiting time. + + This method is especially useful when `block_to_success` was set to false in order to utilize the time to configure other things, such as DUT. + + - **`is_running(dump_out = False)`** + + Checks if there's currently T-Rex session up (with any client). + + If T-Rex is running, this method returns `True` and the result object id updated accordingly. + + If not running, return `False`. + + If a dictionary pointer is given in `dump_out` argument, the pointer object is cleared and the latest dump stored in it. + + - **`get_running_status()`** + + Fetches the current T-Rex status. + + Three possible states + + * `Idle` - No T-Rex session is currently running. + + * `Starting` - A T-Rex session just started (turns into Running after stability condition is reached) + + * `Running` - T-Rex session is currently active. + + The following diagram describes the state machine of T-Rex: + +ifdef::backend-docbook[] +image::images/trex_control_plane_state_machine.png[title="T-Rex running state machine",align="center",width=280, link="images/trex_control_plane_state_machine.png"] +endif::backend-docbook[] + +ifdef::backend-xhtml11[] +image::images/trex_control_plane_state_machine.png[title="T-Rex running state machine",align="center",width=400, link="images/trex_control_plane_state_machine.png"] +endif::backend-xhtml11[] + + - **`get_running_info()`** + + This method performs single poll of T-Rex running data and process it into the result object (named `result_obj`). + + The method returns the most updated data dump from T-Rex in the form of Python dictionary. + + + + Behind the scenes, running that method will trigger inner-client process over the saved window, and produce window-relevant inforamtion, as well as get the most important data moe accessible. + + Once the data has been fetched (at sample rate the satisfies the user), a custom data manipulation can be done in various forms and techniques footnote:[See `CTRexResult` module usage for more details]. + + **Note: ** the sampling rate is bounded from buttom to 2 samples/sec. + + - **`sample_until_condition(condition_func, time_between_samples = 5)`** + + This method automatically sets ongoing sampling of T-Rex data, with sampling rate described by `time_between_samples`. On each fetched dump, the `condition_func` is applied on the result objects, and if returns `True`, the sampling will stop. + + On success (condition has been met), this method returns the latest result object that satisfied the given condition. + + ON fail, this method will raise `UserWarning` exception. + + - **`sample_to_run_finish(time_between_samples = 5)`** + + This method automatically sets ongoing sampling of T-Rex data with sampling rate described by `time_between_samples` until T-Rex run finished. + + - **`get_result_obj()`** + + Returns a pointer to the result object of the client instance. + + Hence, this method returns the result object on which all the data processing takes place. + +TIP: The window stats (calculated when `get_running_info()` triggered) are very helpful in eliminate 'spikes' behavior in numerical values which might float from other data. + +==== `CTRexResult` module usage + +This section covers how to use `CTRexResult` module to access into T-Rex data and post processing results, taking place at the client side whenever a data is polled from the server. + +The most important data structure in this module is the `history` object, which contains the sampled information (plus the post processing step) of each sample. + +Most of the class methods are getters that enables an easy access to the most commonly used when working with T-Rex. These getters are called with self-explained names, such as `get_max_latency`. + +However, on top to these methods, the class offers data accessibility using the rest of the class methods. + +These methods are: + + - **`is_done_warmup()`** + + This will return `True` only if T-Rex has reached its expected transmission bandwidth footnote:[A 3% deviation is allowed.]. + + This parameter is important since in most cases, the most relevent test cases are interesting when T-Rex produces its expected TX, based on which the platform is tested and benchmerked. + + - **`get_latest_dump()`** + + Fetches the latest polled dump saved in history. + + - **`get_last_value (tree_path_to_key, regex = None)`** + + Fetch, out of the latest data dump a value. + + - **`get_value_list (tree_path_to_key, regex = None)`** + + Fetch, out of all data dumps stored in history a value. + + - **History data access API** + + Since (as metioned earlier) the data dump is a JSON-RPC string, which is decoded into Python dictionaries and lists, nested within each other. + + This "Mini API" is used by both `get_last_value` and `get_value_list` methods, and receives in both cases two arguments: `tree_path_to_key, regex` footnote:[By default, `regex` argument is set to None]. + + The user may choose whatever value he wishes to extract, using the `tree_path_to_key` argument. + + * In order to get deeper and deeper on the hierarchy, use the key of the dictionary, separated by dot (‘'.'’) for each level. + + In order to fetch more than one key in a certain dictionary (no matter how deep it is nested), use the `regex` argument to state which keys are to be included. + Example: In order to fetch only the `expected_tx` key values of the latest dump, we'll call: *`get_last_value("trex-global.data", "m_tx_expected_\w+")`* + + This will produce the following dictionary result: + + *`{'m_tx_expected_pps': 21513.6, 'm_tx_expected_bps': 100416760.0, 'm_tx_expected_cps': 412.3}`* + + We can see that the result is every key-value pair, found at the relevant tree-path and matches the provided regex. + + * In order to access an array element, specifying the `key_to_array[i]`, where `i` is the desired array index. + + Example: In order to access the third element of the data array of: + + `{“template_info” : {"name":"template_info","type":0,"data":["avl/delay_10_http_get_0.pcap","avl/delay_10_http_post_0.pcap",` *`"avl/delay_10_https_0.pcap"`* `,"avl/delay_10_http_browsing_0.pcap", "avl/delay_10_exchange_0.pcap","avl/delay_10_mail_pop_0.pcap","avl/delay_10_mail_pop_1.pcap","avl/delay_10_mail_pop_2.pcap","avl/delay_10_oracle_0.pcap"]}` + + we'll use the following command: `get_last_value("template_info.data[2]”)`. + + This will produce the following result: + + *`avl/delay_10_https_0.pcap`* + + + +== Usage Examples + +=== Exmaple #1: Checking T-Rex status and Launching T-Rex + +The following program checks T-Rex status, and later on launches it, querying its status along different time slots. + +[source, python] +---- +import time + +trex = CTRexClient('trex-name') +print "Before Running, T-Rex status is: ", trex.is_running() # <1> +print "Before Running, T-Rex status is: ", trex.get_running_status() # <2> + +ret = trex.start_trex( c = 2, # <3> + m = 0.1, + d = 40, + f = 'avl/sfr_delay_10_1g.yaml', + nc = True, + p = True, + l = 1000) + +print "After Starting, T-Rex status is: ", trex.is_running(), trex.get_running_status() + +time.sleep(10) # <4> + +print "Is T-Rex running? ", trex.is_running(), trex.get_running_status() # <5> +---- + +<1> `is_running()` returns a boolean and checks if T-Rex is running or not. + +<2> `get_running_status()` returns a Python dictionary with T-Rex state, along with a verbose field containing extra info, if available. + +<3> T-Rex lanching. All types of inputs are supported. Some fields (such as 'f' and 'd' are mandatory). + +<4> Going to sleep for few seconds, allowing T-Rex to start. + +<5> Checking out with T-Rex status again, printing both a boolean return value and a full status. + +This code will prompt the following output, assuming a server was launched on the T-Rex machine. + +---- +Connecting to T-Rex @ http://trex-dan:8090/ ... +Before Running, T-Rex status is: False +Before Running, T-Rex status is: {u'state': , u'verbose': u'T-Rex is Idle'} + <1> <1> + +After Starting, T-Rex status is: False {u'state': , u'verbose': u'T-Rex is starting'} + <1> <1> +Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} + <1> <1> +---- + +<1> When looking at T-Rex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. + + +=== Exmaple #2: Checking T-Rex status and Launching T-Rex with 'BAD PARAMETERS' + +The following program checks T-Rex status, and later on launches it with wrong input ('mdf' is not legal option), hence T-Rex run will not start and a message will be available. + +[source, python] +---- +import time + +trex = CTRexClient('trex-name') +print "Before Running, T-Rex status is: ", trex.is_running() # <1> +print "Before Running, T-Rex status is: ", trex.get_running_status() # <2> + +ret = trex.start_trex( c = 2, # <3> +#<4> mdf = 0.1, + d = 40, + f = 'avl/sfr_delay_10_1g.yaml', + nc = True, + p = True, + l = 1000) + +print "After Starting, T-Rex status is: ", trex.is_running(), trex.get_running_status() + +time.sleep(10) # <5> + +print "Is T-Rex running? ", trex.is_running(), trex.get_running_status() # <6> +---- + +<1> `is_running()` returns a boolean and checks if T-Rex is running or not. + +<2> `get_running_status()` returns a Python dictionary with T-Rex state, along with a verbose field containing extra info, if available. + +<3> T-Rex lanching. All types of inputs are supported. Some fields (such as 'f' and 'c' are mandatory). + +<4> Wrong parameter ('mdf') injected. + +<5> Going to sleep for few seconds, allowing T-Rex to start. + +<6> Checking out with T-Rex status again, printing both a boolean return value and a full status. + +This code will prompt the following output, assuming a server was launched on the T-Rex machine. +---- +Connecting to T-Rex @ http://trex-dan:8090/ ... +Before Running, T-Rex status is: False +Before Running, T-Rex status is: {u'state': , u'verbose': u'T-Rex is Idle'} + <1> <1> + +After Starting, T-Rex status is: False {u'state': , u'verbose': u'T-Rex is starting'} + <1> <1> +Is T-Rex running? False {u'state': , u'verbose': u'T-Rex run failed due to wrong input parameters, or due to reachability issues.'} + <2> <2> +---- + +<1> When looking at T-Rex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. + +<2> After T-Rex lanuching failed, a message indicating the failure reason. However, T-Rex is back Idle, ready to handle another launching request. + + +=== Exmaple #3: Launching T-Rex, let it run until custom condition is satisfied + +The following program will launch T-Rex, and poll its result data until custom condition function returns `True`. + In this case, the condition function is simply named `condition`. + +Once the condition is met, T-Rex run will be terminated. + +[source, python] +---- +print "Before Running, T-Rex status is: ", trex.get_running_status() + + print "Starting T-Rex..." + ret = trex.start_trex( c = 2, + mdf = 0.1, + d = 1000, + f = 'avl/sfr_delay_10_1g.yaml', + nc = True, + p = True, + l = 1000) + + def condition (result_obj): #<1> + return result_obj.get_current_tx_rate()['m_tx_pps'] > 200000 + + res = trex.sample_until_condition(condition) #<2> + + print res #<3> + val_list = res.get_value_list("trex-global.data", "m_tx_expected_\w+") #<4> +---- + +<1> The `condition` function defines when to stop T-Rex. In this case, when T-Rex's current tx (in pps) exceeds 200000. + +<2> The condition is passed to `sample_until_condition` method, which will block until either the condition is met or an 'Exception' is raised. + +<3> Once satisfied, `res` variable holds the first result object on which the condition satisfied. At this point, T-Rex status is 'Idle' and another run can be requested from the server. + +<4> Further custom processing can be made on the result object, regardless of other T-Rex runs. + +<<< + +=== Exmaple #4: Launching T-Rex, monitor live data and stopping on demand + +The following program will launch T-Rex, and while it runs poll the server (every 5 seconds) for running inforamtion, such as latency, drops, and other extractable parameters. + +Then, after some criteria was met, T-Rex execution is terminated, enabeling others to use the resource instead of waiting for the entire execution to finish. + +[source, python] +---- +print "Before Running, T-Rex status is: ", trex.get_running_status() + + print "Starting T-Rex..." + ret = trex.start_trex( c = 2, + mdf = 0.1, + d = 100, + f = 'avl/sfr_delay_10_1g.yaml', + nc = True, + p = True, + l = 1000) + + last_res = dict() + while trex.is_running(dump_out = last_res): #<1> + print '\n\n*****************************************' + print "RECEIVED DUMP:" + print last_res, "\n\n\n" + + print "CURRENT RESULT OBJECT" + obj = trex.get_result_obj() + #<2> # Custom data processing is done here, for example: + print obj.get_value_list("trex-global.data.m_tx_bps") + time.sleep(5) #<3> + + print "Terminating T-Rex..." + ret = trex.stop_trex() #<4> +---- + +<1> Iterate as long as T-Rex is running. + + In this case the latest dump is also saved into `last_res` variable, so easier access for that data is available, although not needed most of the time. + +<2> Data processing. This is fully customizable for the relevant test initiated. + +<3> The sampling rate is flexibale and can be configured depending on the desired output. + +<4> T-Rex termination. + +<<< + +=== Exmaple #5: Launching T-Rex, let it run until finished + +The following program will launch T-Rex, and poll it automatically until run finishes. The polling rate is customisable (in this case, every 10 seconds) using `time_between_samples` argument. + +[source, python] +---- +print "Before Running, T-Rex status is: ", trex.get_running_status() + + print "Starting T-Rex..." + ret = trex.start_trex( c = 2, #<1> + mdf = 0.1, + d = 1000, + f = 'avl/sfr_delay_10_1g.yaml', + nc = True, + p = True, + l = 1000) + + res = trex.sample_to_run_finish(time_between_samples = 10) #<2> + + print res #<3> + val_list = res.get_value_list("trex-global.data", "m_tx_expected_\w+") #<4> +---- + +<1> T-Rex run initialization. + +<2> Define the sample rate and block until T-Rex run ends. Once this method returns (assuming no error), T-Rex result object will contain the samples collected allong T-Rex run, limited to the history size footnoteref:[For example, For example for history sized 100 only the latest 100 samples will be available despite sampling more than that during T-Rex run.]. + +<3> Once finished, `res` variable holds the latest result object. + +<4> Further custom processing can be made on the result object, regardless of other T-Rex runs. \ No newline at end of file diff --git a/trex_control_plane_peek.asciidoc b/trex_control_plane_peek.asciidoc new file mode 100755 index 00000000..530da965 --- /dev/null +++ b/trex_control_plane_peek.asciidoc @@ -0,0 +1,225 @@ +T-Rex Control Plane Design - Phase 1 peek +========================================= +:author: Dan Klein +:email: +:revnumber: 1.0 +:quotes.++: +:numbered: + + + +=== T-Rex traffic generator + +T-Rex traffic generator is a tool design the benchmark platforms with realistic traffic. +This is a work-in-progress product, which is under constant developement, new features are added and support for more router's fuctionality is achieved. + +=== T-Rex Control + +T-Rex control plane is under developement, and a phase 1 is planned to be published soon (Apr 2015). + +This document will shortly describe the planned control plane for T-Rex, which is planned to be more scalable and support automation more intuitively. + +==== T-Rex Control Plane - High Level + +T-Rex control plane is based on a JSON RPC transactions between clients and server. + +Each T-Rex machine will have a server running on it, closely interacting with T-Rex (clients do not approach T-Rex directly). + +As future feature, and as multiple T-Rexes might run on the same machine, single server shall serve all T-Rexes running a machine. + +The client is a Python based application that created `TRexClient` instances. + +Using class methods, the client interacts with T-Rex server, and enable it to perform the following commands: + + 1. Start T-Rex run (custom parameters supported). + + 2. Stop T-Rex run. + + 3. Check what is the T-Rex status (possible states: `Idle, Starting, Running`). + + 4. Poll (by customize sampling) the server and get live results from T-Rex **while still running**. + + 5. Get custom T-Rex stats based on a window of saved history of latest 'N' polling results. + + +==== T-Rex Control Plane - Example crumbs + + + + - **Exmaple #1: Checking T-Rex status and Launching T-Rex** + The following program checks T-Rex status, and later on launches it, querying its status along different time slots. + +[source, python] +---- +import time + +trex = CTRexClient('trex-name') +print "Before Running, T-Rex status is: ", trex.is_running() # <1> +print "Before Running, T-Rex status is: ", trex.get_running_status() # <2> + +ret = trex.start_trex( c = 2, # <3> + m = 0.1, + d = 20, + f = 'avl/sfr_delay_10_1g.yaml', + nc = True, + p = True, + l = 1000) + +print "After Starting, T-Rex status is: ", trex.is_running(), trex.get_running_status() + +time.sleep(10) # <4> + +print "Is T-Rex running? ", trex.is_running(), trex.get_running_status() # <5> +---- + +<1> `is_running()` returns a boolean and checks if T-Rex is running or not. + +<2> `get_running_status()` returns a Python dictionary with T-Rex state, along with a verbose field containing extra info, if available. + +<3> T-Rex lanching. All types of inputs are supported. Some fields (such as 'f' and 'c' are mandatory). + +<4> Going to sleep for few seconds, allowing T-Rex to start. + +<5> Checking out with T-Rex status again, printing both a boolean return value and a full status. + +This code will prompt the following output, assuming a server was launched on the T-Rex machine. + +---- +Connecting to T-Rex @ http://trex-dan:8090/ ... +Before Running, T-Rex status is: False +Before Running, T-Rex status is: {u'state': , u'verbose': u'T-Rex is Idle'} + <1> <1> + +After Starting, T-Rex status is: False {u'state': , u'verbose': u'T-Rex is starting'} + <1> <1> +Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} + <1> <1> +---- + +<1> When looking at T-Rex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. + + + * **Exmaple #2: Checking T-Rex status and Launching T-Rex with 'BAD PARAMETERS'** + The following program checks T-Rex status, and later on launches it with wrong input ('mdf' is not legal option), hence T-Rex run will not start and a message will be available. + +[source, python] +---- +import time + +trex = CTRexClient('trex-name') +print "Before Running, T-Rex status is: ", trex.is_running() # <1> +print "Before Running, T-Rex status is: ", trex.get_running_status() # <2> + +ret = trex.start_trex( c = 2, # <3> +#<4> mdf = 0.1, + d = 20, + f = 'avl/sfr_delay_10_1g.yaml', + nc = True, + p = True, + l = 1000) + +print "After Starting, T-Rex status is: ", trex.is_running(), trex.get_running_status() + +time.sleep(10) # <5> + +print "Is T-Rex running? ", trex.is_running(), trex.get_running_status() # <6> +---- + +<1> `is_running()` returns a boolean and checks if T-Rex is running or not. + +<2> `get_running_status()` returns a Python dictionary with T-Rex state, along with a verbose field containing extra info, if available. + +<3> T-Rex lanching. All types of inputs are supported. Some fields (such as 'f' and 'c' are mandatory). + +<4> Wrong parameter ('mdf') injected. + +<5> Going to sleep for few seconds, allowing T-Rex to start. + +<6> Checking out with T-Rex status again, printing both a boolean return value and a full status. + +This code will prompt the following output, assuming a server was launched on the T-Rex machine. +---- +Connecting to T-Rex @ http://trex-dan:8090/ ... +Before Running, T-Rex status is: False +Before Running, T-Rex status is: {u'state': , u'verbose': u'T-Rex is Idle'} + <1> <1> + +After Starting, T-Rex status is: False {u'state': , u'verbose': u'T-Rex is starting'} + <1> <1> +Is T-Rex running? False {u'state': , u'verbose': u'T-Rex run failed due to wrong input parameters, or due to reachability issues.'} + <2> <2> +---- + +<1> When looking at T-Rex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. + +<2> After T-Rex lanuching failed, a message indicating the failure reason. However, T-Rex is back Idle, ready to handle another launching request. + + + * **Exmaple #3: Launching T-Rex, monitor live data and stopping on demand** + The following program will launch T-Rex, and while it runs poll the server (every 5 seconds) for running inforamtion, such as latency, drops, and other extractable parameters. + + Then, after some criteria was met, T-Rex execution is terminated, enabeling others to use the resource instead of waiting for the entire execution to finish. + +[source, python] +---- +print "Before Running, T-Rex status is: ", trex.get_running_status() + + print "Starting T-Rex..." + ret = trex.start_trex( c = 2, + mdf = 0.1, + d = 100, + f = 'avl/sfr_delay_10_1g.yaml', + nc = True, + p = True, + l = 1000) + + print "After Starting, T-Rex status is: ", trex.is_running(), trex.get_running_status() + print "sleeping 20 secs.." + time.sleep(20) + for i in range(5): + print "Is T-Rex running? ", trex.is_running(), trex.get_running_status() #<1> + #<2> received_info = trex.get_running_info() + #<3> # Custom data processing is done here + #<4> time.sleep(5) + + print "Terminating T-Rex..." + #<5> ret = trex.stop_trex() + print "After stopping, T-Rex status is: ", trex.is_running(), trex.get_running_status() #<6> +---- + +<1> Running queries is still optional, although not mandatory in order to get stats. + +<2> `get_running_info()` will return the latest data dump available from T-Rex. + + Some aditional data manipulation and queries are under developement, including manipulation over number of dumps, which is useful for avoiding "spikes" of singular behavior. + +<3> Data processing. This is fully customizable for the relevant test initiated. + +<4> The sampling rate is flexibale and can be configured depending on the desired output. + +<5> T-Rex termination. + +<6> Post-termination check for status. + + +This code will prompt the following output, assuming a server was launched on the T-Rex machine. +---- +Connecting to T-Rex @ http://trex-dan:8090/ ... +Before Running, T-Rex status is: False +Before Running, T-Rex status is: {u'state': , u'verbose': u'T-Rex is Idle'} +Starting T-Rex... +After Starting, T-Rex status is: False {u'state': , u'verbose': u'T-Rex is starting'} + +<1> Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} + +<1> Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} + +<1> Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} + +<1> Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} + +<1> Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} + +Before terminating, T-Rex status is: True {u'state': , u'verbose': u'T-Rex is Running'} +Terminating T-Rex... +#<2> After stopping, T-Rex status is: False {u'state': , u'verbose': u'T-Rex finished (terminated).'} + +---- + +<1> Polling T-Rex status while in a data polling loop. + +<2> After termination, we can see that T-Rex is back idle, also the `verbose` field shows the stop reason \ No newline at end of file diff --git a/trex_preso.asciidoc b/trex_preso.asciidoc new file mode 100755 index 00000000..50f4ffc7 --- /dev/null +++ b/trex_preso.asciidoc @@ -0,0 +1,1312 @@ +TRex realistic traffic generator +================================ +:author: hhaim +:email: +:description: TRex Low cost, High scale, realistic traffic generator +:revdate: 2014-11-01 +:revnumber: 0.2 +:deckjs_theme: swiss +:deckjs_transition: horizontal-slide +:scrollable: +:web_server_url: http://trex-tgn.cisco.com/trex + + + +== What problem is being solved? + +* Network elements include complex statful features +* Require testing with statful and real traffic mix +* Traffic generators of statful/realistic traffic are: +** Expensive ~$100-500K +** Not scalable for high rates +** Not flexible +* Due to cost, quality is impacted +** Limited access / testing +** Late testing +** No standard + +++++++++++++++++++ + + + + + + + + + + + + + +++++++++++++++++++ + + +== What is TRex? + + +* TRex is a *statefull* *traffic* *generator* tool based on a smart replay of real flows (not a full TCP/IP stack,yet) +++++++++++++++++++ +title= +++++++++++++++++++ +* Generates, manipulates and amplifies based on templates of a real/captured flows. +* Templates are processed offline +* *High* *performance* full line rate +** tx= up to 200Gb/sec rx=up to 200Gb/sec +* *Low* *cost* C220M UCS-1RU, Cisco internal eqip +* Generate both sides of the traffic Clients and Servers +* *Standard* *hardware* (X86/Intel NIC I350,82599,XL710) +++++++++++++++++++ +title= +++++++++++++++++++ +* Flexible and Open Software (DPDK) +* Support Virtualization +* Virtual interface support *E1000* , *VMXNET3*. Enabler for +** Amazon AWS +** Cisco LaaS + +== Realistic traffic model + +++++++++++++++++++ +title= +++++++++++++++++++ + +== TRex high level software architecture + +++++++++++++++++++ +title= +++++++++++++++++++ + +* *DPDK* +** user space driver +** Big TLB +** Utilize all DDR banks + +* *Multi-Threaded* +** Scale linearly +** ~20Gb/sec per DP core + +* *No TCP-IP* +** Fast events scheduler +** Generate flows template +** Can support 1K templates + +* *Slow-path flexibility (per-flow)* +** Client/server generation +** Measure latency +** Measure flow order +** NAT translation learning + +== TRex basic flows generation algorithm + +++++++++++++++++++ +title= +++++++++++++++++++ + +== DNS simple profile example + +* traffic profile is in YAML fomat + +[source,python] +---- +$more cap2/dns_test.yaml +- duration : 10.0 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.0.255" + dual_port_mask : "1.0.0.0" + tcp_aging : 1 + udp_aging : 1 + cap_info : + - name: cap2/dns.pcap <1> + cps : 1.0 <2> +---- +<1> the pcap file that include DNS cap file that will be replicate +<2> how many connection per second to generate, 1.0 means 1 connection per secod + +image:images/dns_wireshark.png[title="generator"] + +== DNS simple profile output + +.Formated results +[format="csv",cols="1^,2^,1^,1^,2^,1^,2^,1^", options="header"] +|================= + pkt,time sec,fid,flow-pkt-id,client_ip,client_port,server_ip ,direction + 1 , 0.010000 , 1 , 1 , 16.0.0.1 , 1024 , 48.0.0.1 , -> + 2 , 0.020000 , 1 , 2 , 16.0.0.1 , 1024 , 48.0.0.1 , <- + 3 , 2.010000 , 2 , 1 , 16.0.0.2 , 1024 , 48.0.0.2 , -> + 4 , 2.020000 , 2 , 2 , 16.0.0.2 , 1024 , 48.0.0.2 , <- + 5 , 3.010000 , 3 , 1 , 16.0.0.3 , 1024 , 48.0.0.3 , -> + 6 , 3.020000 , 3 , 2 , 16.0.0.3 , 1024 , 48.0.0.3 , <- + 7 , 4.010000 , 4 , 1 , 16.0.0.4 , 1024 , 48.0.0.4 , -> + 8 , 4.020000 , 4 , 2 , 16.0.0.4 , 1024 , 48.0.0.4 , <- + 9 , 5.010000 , 5 , 1 , 16.0.0.5 , 1024 , 48.0.0.5 , -> + 10 , 5.020000 , 5 , 2 , 16.0.0.5 , 1024 , 48.0.0.5 , <- + 11 , 6.010000 , 6 , 1 , 16.0.0.6 , 1024 , 48.0.0.6 , -> +|================= + +== DNS simple profile chart + + ++++++++++++++++++++++++++++++++++ +
+ + + ++++++++++++++++++++++++++++++++++ + +== HTTP & DNS profile example + +[source,python] +---- +- duration : 1.0 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.10" + servers_start : "48.0.0.1" + servers_end : "48.0.0.3" + dual_port_mask : "1.0.0.0" + tcp_aging : 1 + udp_aging : 1 + cap_ipg : true + cap_info : + - name: cap2/dns.pcap + cps : 10.0 <1> + - name: avl/delay_10_http_browsing_0.pcap + cps : 2.0 <1> +---- +<1> Diffrent CPS + +* Inter packet Gap (IPG) is taken from pcap file +* IPG can be manualy set + +== HTTP & DNS profile chart + ++++++++++++++++++++++++++++++++++ +
+ + ++++++++++++++++++++++++++++++++++ + +== EMIX traffic profile + +++++++++++++++++++ +title= +++++++++++++++++++ + +* EMIX traffic profile suggested. used for NBAR/AVC tests +* Property +** BW: 70% TCP (http, mail) , 30% UDP (rtp) +** Connections: 50% TCP (http, mail) , 50% UDP (rtp, dns) +** Avg. Packet Size (~585B) +** PPS= 221K per 1Gb/sec +** CPS = 4K per 1Gb/sec +** Flows per 1Gb/sec = 50K +** Avg. Packets per flow (50) + +== EMIX YAML profile + +.Simplified version of EMIX YAML profile +[source,python] +---- +- duration : 0.1 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.1.255" + servers_start : "48.0.0.1" + servers_end : "48.0.20.255" + dual_port_mask : "1.0.0.0" + cap_ipg : true + cap_info : + - name: avl/delay_10_http_get_0.pcap + cps : 404.52 + - name: avl/delay_10_http_post_0.pcap + cps : 404.52 + - name: avl/delay_10_https_0.pcap + cps : 130.8745 + - name: avl/delay_10_http_browsing_0.pcap + cps : 709.89 + - name: avl/delay_10_exchange_0.pcap + cps : 253.81 + - name: avl/delay_10_mail_pop_2.pcap + cps : 4.759 + - name: avl/delay_10_oracle_0.pcap + cps : 79.3178 + - name: avl/delay_10_rtp_160k_full.pcap + cps : 2.776 + - name: avl/delay_10_smtp_0.pcap + cps : 7.3369 + - name: avl/delay_10_sip_video_call_full.pcap + cps : 29.347 + - name: avl/delay_10_citrix_0.pcap + cps : 43.6248 + - name: avl/delay_10_dns_0.pcap + cps : 1975.015 +---- + +== EMIX profile chart + ++++++++++++++++++++++++++++++++++ +
+ + ++++++++++++++++++++++++++++++++++ + +== TRex command line main options + +[source,bash] +---- +$.sudo /t-rex-64 -f [traffic_yaml] -m [muti] -d [duration] -l [Hz=1000] -c [cores] +---- + +*-f=TRAFFIC_CFG* :: + Traffic YAML configuration file + +*-m*:: + CPS multiplier + +*-d=DURATION* :: + Duration of the test in sec + +*-l=Hz* :: + Latency pakets rate (Hz) + +*-c*:: + How many cores to use + + +== TRex Vision + +* All in One, most of the functionality of existing tools +* Standard tests (e.g. traffic mix, automation) across features/platforms +* Cisco wide developers community +* Open source, Standardize tests + +== Trex GUI + +image:images/TrexViewer.png[width=500] + +* Monitor GUI works on Windows 7 +* Nonitor real-time properties of TRex ( e.g min/max/average latency,jitter ) + +== Python API and automation tools + +++++++++++++++++++ +title= + +
+ +++++++++++++++++++ + +[source,python] +----- +import trex_client + + trex = trex_client.CTRexClient('csi-kiwi-02') + ret = trex.start_trex( + m = 1.1, + d = 20, + f = 'avl/sfr_delay_10_1g.yaml', + l = 1000) + + while trex.is_running() : + trex.get_running_info() + res=trex.get_result_obj() + if res.is_done_warmup (): + res.get_value_list("trex-global.data.m_tx_bps"); + time.sleep(1); + + ret = trex.stop_trex() +----- + +++++++++++++++++++ +
+ + +++++++++++++++++++ + + +== Trex On your laptop + +++++++++++++++++++ +title= +++++++++++++++++++ + +* You can experience TRex on your laptop + +== Roadmap + +* TCP stack +* Export SDK application plugins API - more application support +* Automation (e.g ATS, Cisco HLTAPI etc) +* Open source +* Tunnel agnostic (e.g. VXLAN,QinQ,MPLS,NSH) + +== Benefit + +** Significant budget saving +** Product quality + +== Resource + +.Resource +[options="header",cols="1^",width="40%"] +|================= +| Link +| link:trex_manual.html[Manual-html] +| link:trex_book.pdf[Manual-pdf] +| link:release_notes.html[Release Notes] +| link:trex_manual.html#_download_and_installation[How to install] +|================= + + +++++++++++++++++++ + +
+

+Backup + +
+++++++++++++++++++ + + + + diff --git a/trex_vm_manual.asciidoc b/trex_vm_manual.asciidoc new file mode 100755 index 00000000..3a1e62d6 --- /dev/null +++ b/trex_vm_manual.asciidoc @@ -0,0 +1,324 @@ +T-Rex Virtual Machine setup and basic usage +=========================================== +:author: Dan Klein +:email: +:revnumber: 1.0 +:quotes.++: +:numbered: + + +== Introduction + +=== T-Rex traffic generator + +T-Rex traffic generator is a tool design the benchmark platforms with realistic traffic. +This is a work-in-progress product, which is under constant developement, new features are added and support for more router's fuctionality is achieved. + +One of the innovative tools through which T-Rex can be learned and tested during development is a virtual machine instance, fully simulating T-Rex without the need of any additional hardware. + + +==== T-Rex Virtual Machine + +The T-Rex Virtual Machine is based on Oracle's Virtual Box freeware. + +It is designed to enable T-Rex newbies to explore this tool without any special resources, and no single network cable attached. Moreover, is requires no internet or outer network connectivity. + + +== Setup and Usage + +=== Setup + +In order to use T-Rex VM, there are several super-easy steps to follow: + + . Download and install Oracle VM Virtual Box Manage https://www.virtualbox.org/wiki/Downloads[(VB download link)]. + + During the installation you'll be asked to allow the installation of system devices component interactions. Allow it. + . Download the latest T-Rex VM by http://csi-wiki-01:8181/trex/T_Rex_162_VM_Fedora_21.ova[clicking on this link]. + . Open Oracle VM Virtual Box application installed at step 1. + . Under 'File' tab, select 'Import Appliance' (`ctrl+I` shorthand can be used as well). The following screen will apear: + +ifdef::backend-docbook[] +image::images/vm_import.png[title="VM import screen",align="center",width=400, link="images/vm_import.png"] +endif::backend-docbook[] + +ifdef::backend-xhtml11[] +image::images/vm_import.png[title="VM import screen",align="center",width=900, link="images/vm_import.png"] +endif::backend-xhtml11[] + + + + . Browse and select the .ova file you have downloaded at step 2, and click 'continue'. + . Click 'Next, and then make sure that the 'Reinitialize the MAC address of all network cards' checkbox is **not selected**. + . Click 'import' and wait for the import process to finish. + . **That's it! you're all good and set to go!** + + +=== Launching and logging into the machine + +Let's get started with running T-Rex! + +First, launch the virtual machine by selecting it in the VM's menu and hitting 'Start' button. + +ifdef::backend-docbook[] +image::images/vm_selection_screen.png[title="T-Rex VM launching screen",align="center",width=400, link="images/vm_selection_screen.png"] +endif::backend-docbook[] + +ifdef::backend-xhtml11[] +image::images/vm_selection_screen.png[title="T-Rex VM launching screen",align="center",width=900, link="images/vm_selection_screen.png"] +endif::backend-xhtml11[] + + + + +[IMPORTANT] +==== +You may encounter with "VT-x is disabled" error, similar to the image below. + +In that case, please refer to https://www.virtualbox.org/ticket/4130[this link] and follow the provided steps to overcome this issue. +==== + +ifdef::backend-docbook[] +image::images/trex_vm_bios_err.png[title="VT-x disabled possible error message",align="center",width=400, link="images/trex_vm_bios_err.png"] +endif::backend-docbook[] + +ifdef::backend-xhtml11[] +image::images/trex_vm_bios_err.png[title="VT-x disabled possible error message",align="center",width=900, link="images/trex_vm_bios_err.png"] +endif::backend-xhtml11[] + + +<<<<< + +Next, once the machine is finished booting, login to the machine using the following credentials: + + - Username: `trex` + + - Password: `trex` + +ifdef::backend-docbook[] +image::images/trex_vm_login.png[title="T-Rex VM login",align="center",width=400, link="images/trex_vm_login.png"] +endif::backend-docbook[] + +ifdef::backend-xhtml11[] +image::images/trex_vm_login.png[title="T-Rex VM login",align="center",width=900, link="images/trex_vm_login.png"] +endif::backend-xhtml11[] + + +TIP: a remote connection to the machine from anywhere in the hosting machine can be achieved by the following command: + + `ssh -p 3022 trex@127.0.0.1` + +=== Running T-Rex traffic generator + +Finally, we're ready to do some T-Rex magic. This is super easy and can be achived be the following steps: + + 1. Change dir to latest version supplied using: `cd /home/trex/v1.62/`. + + 2. Run your desired T-Rex command. + +IMPORTANT: When launching a T-Rex command pay attention make sure that: + + 1. use `sudo` prefix at the beggining of the command. + + 2. Specify the `-f` argument first among all arguments. + + +For example, let's run T-Rex with DNS traffic generation and no latency. The Runnning command is: + +---- +[trex@localhost v1.62]$ sudo ./t-rex-64 -f cap2/dns.yaml -d 100 -m 1 --nc +Starting T-Rex 1.62 please wait ... +found configuration file at /etc/trex_cfg.yaml +zmq publisher at: tcp://*:4500 + +... +<1> +... + +-Per port stats table + ports | 0 | 1 + ----------------------------------------------------------------------------------------- + opackets | 17 | 17 + obytes | 1241 | 1513 + ipackets | 17 | 17 + ibytes | 1513 | 1241 + ierrors | 0 | 0 + oerrors | 0 | 0 + Tx Bw | 582.35 bps | 709.99 bps + +-Global stats enabled + Cpu Utilization : 0.8 % 0.0 Gb/core + Platform_factor : 1.0 + Total-Tx : 1.29 Kbps + Total-Rx : 1.29 Kbps + Total-PPS : 1.99 pps + Total-CPS : 1.00 cps + + Expected-PPS : 2.00 pps + Expected-CPS : 1.00 cps + Expected-BPS : 1.30 Kbps + + Active-flows : 0 Clients : 511 Socket-util : 0.0001 % + Open-flows : 17 Servers : 255 Socket : 17 Socket/Clients : 0.0 + drop-rate : 0.00 bps + current time : 18.7 sec + test duration : 81.3 sec +---- +<1> Output prompt continues (trimmed for reader's comfort). + +Now, lets review the generated packets as they observed footnoteref:[<1>,The given output represents the output begining.] by our promiscuous interface (interface #2): + +---- +[trex@localhost ~]$ sudo tcpdump -i enp0s8 +tcpdump: verbose output suppressed, use -v or -vv for full protocol decode +listening on enp0s8, link-type EN10MB (Ethernet), capture size 262144 bytes +09:38:53.953651 IP 16.0.0.2.1024 > 48.0.0.2.domain: 48 A? www.cisco.com. (31) +09:38:53.963969 IP 48.0.0.2.domain > 16.0.0.2.1024: 48* 1/0/0 A 100.100.100.100 (47) +09:38:54.960361 IP 16.0.0.3.1024 > 48.0.0.3.domain: 48 A? www.cisco.com. (31) +09:38:54.970358 IP 48.0.0.3.domain > 16.0.0.3.1024: 48* 1/0/0 A 100.100.100.100 (47) +09:38:55.967200 IP 16.0.0.4.1024 > 48.0.0.4.domain: 48 A? www.cisco.com. (31) +09:38:55.977222 IP 48.0.0.4.domain > 16.0.0.4.1024: 48* 1/0/0 A 100.100.100.100 (47) +09:38:56.975355 IP 16.0.0.5.1024 > 48.0.0.5.domain: 48 A? www.cisco.com. (31) +09:38:56.985379 IP 48.0.0.5.domain > 16.0.0.5.1024: 48* 1/0/0 A 100.100.100.100 (47) +09:38:57.981659 IP 16.0.0.6.1024 > 48.0.0.6.domain: 48 A? www.cisco.com. (31) +09:38:57.992358 IP 48.0.0.6.domain > 16.0.0.6.1024: 48* 1/0/0 A 100.100.100.100 (47) +09:38:58.990979 IP 16.0.0.7.1024 > 48.0.0.7.domain: 48 A? www.cisco.com. (31) +09:38:59.000952 IP 48.0.0.7.domain > 16.0.0.7.1024: 48* 1/0/0 A 100.100.100.100 (47) +09:39:00.009403 IP 16.0.0.8.1024 > 48.0.0.8.domain: 48 A? www.cisco.com. (31) +09:39:00.019456 IP 48.0.0.8.domain > 16.0.0.8.1024: 48* 1/0/0 A 100.100.100.100 (47) +09:39:01.015810 IP 16.0.0.9.1024 > 48.0.0.9.domain: 48 A? www.cisco.com. (31) +---- + + +Let's have a look at another example. + +We want to run T-Rex with simple http traffic generation. The running command will look like this: + +---- +[trex@localhost v1.62]$ sudo ./t-rex-64 -f cap2/http_simple.yaml -d 100 -l 1000 -m 1 --nc +Starting T-Rex 1.62 please wait ... +found configuration file at /etc/trex_cfg.yaml +zmq publisher at: tcp://*:4500 + +... +<1> +... + +-Per port stats table + ports | 0 | 1 + ----------------------------------------------------------------------------------------- + opackets | 40983 | 41946 + obytes | 2563951 | 6015664 + ipackets | 41946 | 40983 + ibytes | 6015664 | 2563951 + ierrors | 0 | 0 + oerrors | 0 | 0 + Tx Bw | 520.83 Kbps | 1.27 Mbps + +-Global stats enabled + Cpu Utilization : 3.1 % 0.1 Gb/core + Platform_factor : 1.0 + Total-Tx : 1.79 Mbps + Total-Rx : 1.79 Mbps + Total-PPS : 2.11 Kpps + Total-CPS : 2.84 cps + + Expected-PPS : 102.71 pps + Expected-CPS : 2.78 cps + Expected-BPS : 764.51 Kbps + + Active-flows : 0 Clients : 255 Socket-util : 0.0000 % + Open-flows : 107 Servers : 65535 Socket : 0 Socket/Clients : 0.0 + drop-rate : 0.00 bps + current time : 39.6 sec + test duration : 60.4 sec + +-Latency stats enabled + Cpu Utilization : 1.0 % + if| tx_ok , rx_ok , rx ,error, average , max , Jitter , max window + | , , check, , latency(usec),latency (usec) ,(usec) , + ---------------------------------------------------------------------------------------- + 0 | 39490, 39489, 0, 0, 1276 , 106714, 91 | 1737 1880 + 1 | 39490, 39490, 0, 0, 226 , 107619, 203 | 1694 1041 + +---- + +<1> Output prompt continues (trimmed for reader's comfort). + +Once again, lets review the generated packets as they observed footnoteref:[<1>] by our promiscuous interface (interface #2): + +---- +[trex@localhost ~]$ sudo tcpdump -a -i enp0s8 +tcpdump: verbose output suppressed, use -v or -vv for full protocol decode +listening on enp0s8, link-type EN10MB (Ethernet), capture size 262144 bytes +19:31:46.650426 IP 16.0.0.1.1024 > 48.0.0.1.http: Flags [S], seq 404375002, win 32768, options [mss 1460], length 0 +19:31:46.650439 IP 16.0.0.1.17 > 48.0.0.1.80: sctp (1) [Bad chunk length 0] +19:31:46.650442 IP 48.0.0.1.17 > 16.0.0.1.80: sctp (1) [Bad chunk length 0] +19:31:46.650445 IP 16.0.0.1.17 > 48.0.0.1.80: sctp (1) [Bad chunk length 0] +19:31:46.652458 IP 48.0.0.1.17 > 16.0.0.1.80: sctp (1) [Bad chunk length 0] +19:31:46.652462 IP 16.0.0.1.17 > 48.0.0.1.80: sctp (1) [Bad chunk length 0] +19:31:46.652465 IP 48.0.0.1.17 > 16.0.0.1.80: sctp (1) [Bad chunk length 0] +19:31:47.152768 IP 16.0.0.1.17 > 48.0.0.1.80: sctp (1) [Bad chunk length 0] +19:31:47.152788 IP 48.0.0.1.17 > 16.0.0.1.80: sctp (1) [Bad chunk length 0] +19:31:47.153796 IP 16.0.0.1.17 > 48.0.0.1.80: sctp (1) [Bad chunk length 0] +19:31:47.153801 IP 48.0.0.1.17 > 16.0.0.1.80: sctp (1) [Bad chunk length 0] +19:31:47.154803 IP 48.0.0.2.http > 16.0.0.2.1024: Flags [P.], seq 404419110:404420570, ack 404375252, win 32768, length 1460 +19:31:47.154823 IP 48.0.0.2.http > 16.0.0.2.1024: Flags [P.], seq 1460:2920, ack 1, win 32768, length 1460 +---- + +[NOTE] +See http://csi-wiki-01:8181/trex/doc/trex_book.pdf[T-Rex full manual] for a complete understading of the tool features and options. + + +=== T-Rex Live monitoring + +Once we have T-Rex up and running, we can enjoy the benefit of having live monitoring on its performance, using TRexViewer application footnote:[Supported only on Windows OS] + + + +This can be easily done by following these steps: + 0. Download the latest version of TrexViewer application and install it using http://csi-wiki-01:8080/display/bpsim/TrexViewer[this link]. + + 1. Start the application and fill in the following: + + - Trex ip: `127.0.0.1:4500` + + 2. Click the play button. + +ifdef::backend-docbook[] +image::images/trex_motinor_config.png[title="T-Rex viewer start screen",align="center",width=400,link="images/trex_motinor_config.png"] +endif::backend-docbook[] + +ifdef::backend-xhtml11[] +image::images/trex_motinor_config.png[title="T-Rex viewer start screen",align="center",width=900,link="images/trex_motinor_config.png"] +endif::backend-xhtml11[] + + + + 3. **That's it!** + + Now the live data from T-Rex will be displayed on the screen. + +ifdef::backend-docbook[] +image::images/trex_motinor_view.png[title="T-Rex viewer monitor screen",align="center",width=400,link="images/trex_motinor_view.png"] +endif::backend-docbook[] + +ifdef::backend-xhtml11[] +image::images/trex_motinor_view.png[title="T-Rex viewer monitor screen",align="center",width=900,link="images/trex_motinor_view.png"] +endif::backend-xhtml11[] + + +[NOTE] +Make sure T-Rex is running, otherwise data will not be available at TRexViewer. + +=== Architecture and network design + +Since no hardware is used, T-Rex simulates traffic using a virtual internal network, named 'trex_intnet'. + +The following figure describes the virtual "wiring" of the virtual machine to support T-Rex traffic simulation. + +ifdef::backend-docbook[] +image::images/T-Rex_vm.png[title="T-Rex virtual connectivity",align="center",width=400, link="images/T-Rex_vm.png"] +endif::backend-docbook[] + +ifdef::backend-xhtml11[] +image::images/T-Rex_vm.png[title="T-Rex virtual connectivity",align="center",width=900, link="images/T-Rex_vm.png"] +endif::backend-xhtml11[] + + +The VM runs T-Rex with single client and single server port. The traffic generated by each of those ports are switched over the 'trex_intnet' virtual network and received by the other side. + +T-Rex identifies only the packets which were dedicately sent by one of those traffic ports and receives them in the other port. Hence, packets generated by client port will be received by the server port and vice versa. + +Ontop, network adapter #4 used to [underline]#listen# to all traffic generated by both of T-Rex's ports, therefore it is very useful in providing live data of the generated flows over the network. \ No newline at end of file diff --git a/waf b/waf new file mode 100755 index 00000000..4e68fedc Binary files /dev/null and b/waf differ diff --git a/waf.css b/waf.css new file mode 100755 index 00000000..e9fa3f50 --- /dev/null +++ b/waf.css @@ -0,0 +1,39 @@ +div.tableblock > table { + border: 1px solid gray; +} + +div#header-pic { + background-image: url("images/bg4.jpg"); + background-repeat: no-repeat; + background-color: #cccccc; +} + +a:visited { + color: #8A2908; +} + + +div#header h1 { + background: url('images/trex_logo_64_64.png') no-repeat left center; + padding-left: 80px; + line-height: 80px; + height: 80px; +} + +div.title, caption.title { + text-align: center; + margin-bottom: 0.2em; +} + +div.tableblock > table th { + background-color: #F4F4F4; +} + +h1, h2, h3, h4, h5, h6, span#author, div.title, caption.title, div.admonitionblock .icon, div#toctitle, div.sidebar-title, div.image-title { + color: #333; +} + +body, div.sectionbody, div#toctitle { + font-family: 'Lucida Grande', Verdana, Arial, sans-serif; +} + diff --git a/wscript b/wscript new file mode 100755 index 00000000..b4fe280c --- /dev/null +++ b/wscript @@ -0,0 +1,180 @@ +#! /usr/bin/env python +# encoding: utf-8 +# hhaim, 2014 (IL) base on WAF book + +""" +call 'waf --targets=waf.pdf' or use 'waf list' to see the targets available +""" + +VERSION='0.0.1' +APPNAME='wafdocs' + +import os, re, shutil + + +top = '.' +out = 'build' + +re_xi = re.compile('''^(include|image)::([^.]*.(asciidoc|\\{PIC\\}))\[''', re.M) +def ascii_doc_scan(self): + p = self.inputs[0].parent + node_lst = [self.inputs[0]] + seen = [] + depnodes = [] + while node_lst: + nd = node_lst.pop(0) + if nd in seen: continue + seen.append(nd) + + code = nd.read() + for m in re_xi.finditer(code): + name = m.group(2) + if m.group(3) == '{PIC}': + + ext = '.eps' + if self.generator.rule.rfind('A2X') > 0: + ext = '.png' + + k = p.find_resource(name.replace('{PIC}', ext)) + if k: + depnodes.append(k) + else: + k = p.find_resource(name) + if k: + depnodes.append(k) + node_lst.append(k) + return [depnodes, ()] + + + +import re +def scansize(self): + name = 'image::%s\\{PIC\\}\\[.*,(width|height)=(\\d+)' % self.inputs[0].name[:-4] + re_src = re.compile(name) + lst = self.inputs[0].parent.get_src().ant_glob('*.txt') + for x in lst: + m = re_src.search(x.read()) + if m: + val = str(int(1.6 * int(m.group(2)))) + if m.group(1) == 'width': + w = val + h = "800" + else: + w = "800" + h = val + + ext = self.inputs[0].name[-3:] + if ext == 'eps': + code = '-geometry %sx%s' % (w, h) + elif ext == 'dia': + if m.group(1) == 'width': + h = '' + else: + w = '' + code = '--size %sx%s' % (w, h) + else: + code = '-Gsize="%s,%s"' % (w, h) + break + else: + return ([], '') + + return ([], code) + +def options(opt): + opt.add_option('--exe', action='store_true', default=False, help='Execute the program after it is compiled') + +def configure(conf): + conf.find_program('asciidoc', path='/usr/local/bin/', var='ASCIIDOC') + pass; + +def convert_to_pdf(task): + input_file = task.outputs[0].abspath() + out_dir = task.outputs[0].parent.get_bld().abspath() + os.system('a2x --no-xmllint -v -f pdf -d article %s -D %s ' %(task.inputs[0].abspath(),out_dir ) ) + return (0) + +def convert_to_pdf_book(task): + input_file = task.outputs[0].abspath() + out_dir = task.outputs[0].parent.get_bld().abspath() + os.system('a2x --no-xmllint -v -f pdf -d book %s -D %s ' %(task.inputs[0].abspath(),out_dir ) ) + return (0) + + +def ensure_dir(f): + if not os.path.exists(f): + os.makedirs(f) + + +def my_copy(task): + input_file=task.outputs[0].abspath() + out_dir=task.outputs[0].parent.get_bld().abspath() + ensure_dir(out_dir) + shutil.copy2(input_file, out_dir+ os.sep+task.outputs[0].name) + return (0) + + +def do_visio(bld): + for x in bld.path.ant_glob('visio\\*.vsd'): + tg = bld(rule='${VIS} -i ${SRC} -o ${TGT} ', source=x, target=x.change_ext('.png')) + +def build(bld): + bld(rule=my_copy, target='symbols.lang') + + for x in bld.path.ant_glob('images\\**\**.png'): + bld(rule=my_copy, target=x) + bld.add_group() + + for x in bld.path.ant_glob('video\\**\**.mp4'): + bld(rule=my_copy, target=x) + bld.add_group() + + + for x in bld.path.ant_glob('images\\**\**.jpg'): + bld(rule=my_copy, target=x) + bld.add_group() + + bld(rule=my_copy, target='my_chart.js') + + bld.add_group() # separator, the documents may require any of the pictures from above + + bld(rule='${ASCIIDOC} -b deckjs -o ${TGT} ${SRC[0].abspath()}', + source='trex_config.asciidoc ', target='trex_config_guide.html', scan=ascii_doc_scan) + + + bld(rule='${ASCIIDOC} -b deckjs -o ${TGT} ${SRC[0].abspath()}', + source='trex_preso.asciidoc ', target='trex_preso.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='release_notes.asciidoc waf.css', target='release_notes.html', scan=ascii_doc_scan) + + + bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', + source='trex_book.asciidoc waf.css', target='trex_manual.html', scan=ascii_doc_scan) + + bld(rule=convert_to_pdf_book, + source='trex_book.asciidoc waf.css', target='trex_book.pdf', scan=ascii_doc_scan) + + + bld(rule=convert_to_pdf_book, + source='trex_vm_manual.asciidoc waf.css', target='trex_vm_manual.pdf', scan=ascii_doc_scan) + + bld(rule=convert_to_pdf_book, + source='trex_control_plane_peek.asciidoc waf.css', target='trex_control_plane_peek.pdf', scan=ascii_doc_scan) + + bld(rule=convert_to_pdf_book, + source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.pdf', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='trex_vm_manual.asciidoc waf.css', target='trex_vm_manual.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='trex_control_plane_peek.asciidoc waf.css', target='trex_control_plane_peek.html', scan=ascii_doc_scan) + + + + + + -- cgit 1.2.3-korg From 2cf47eb690c542a67cddfb300a479ed2c2adb1c5 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 28 Jun 2015 16:59:54 +0300 Subject: minor update --- trex_vm_manual.asciidoc | 70 ++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/trex_vm_manual.asciidoc b/trex_vm_manual.asciidoc index 3a1e62d6..f866ec19 100755 --- a/trex_vm_manual.asciidoc +++ b/trex_vm_manual.asciidoc @@ -11,28 +11,28 @@ T-Rex Virtual Machine setup and basic usage === T-Rex traffic generator -T-Rex traffic generator is a tool design the benchmark platforms with realistic traffic. +TRex traffic generator is a tool design the benchmark platforms with realistic traffic. This is a work-in-progress product, which is under constant developement, new features are added and support for more router's fuctionality is achieved. -One of the innovative tools through which T-Rex can be learned and tested during development is a virtual machine instance, fully simulating T-Rex without the need of any additional hardware. +One of the innovative tools through which TRex can be learned and tested during development is a virtual machine instance, fully simulating TRex without the need of any additional hardware. -==== T-Rex Virtual Machine +==== TRex Virtual Machine -The T-Rex Virtual Machine is based on Oracle's Virtual Box freeware. +The TRex Virtual Machine is based on Oracle's Virtual Box freeware. -It is designed to enable T-Rex newbies to explore this tool without any special resources, and no single network cable attached. Moreover, is requires no internet or outer network connectivity. +It is designed to enable TRex newbies to explore this tool without any special resources, and no single network cable attached. Moreover, is requires no internet or outer network connectivity. == Setup and Usage === Setup -In order to use T-Rex VM, there are several super-easy steps to follow: +In order to use TRex VM, there are several super-easy steps to follow: . Download and install Oracle VM Virtual Box Manage https://www.virtualbox.org/wiki/Downloads[(VB download link)]. + During the installation you'll be asked to allow the installation of system devices component interactions. Allow it. - . Download the latest T-Rex VM by http://csi-wiki-01:8181/trex/T_Rex_162_VM_Fedora_21.ova[clicking on this link]. + . Download the latest TRex VM by http://trex-tgn.cisco.com/trex/T_Rex_162_VM_Fedora_21.ova[clicking on this link]. . Open Oracle VM Virtual Box application installed at step 1. . Under 'File' tab, select 'Import Appliance' (`ctrl+I` shorthand can be used as well). The following screen will apear: @@ -54,16 +54,16 @@ endif::backend-xhtml11[] === Launching and logging into the machine -Let's get started with running T-Rex! +Let's get started with running TRex! First, launch the virtual machine by selecting it in the VM's menu and hitting 'Start' button. ifdef::backend-docbook[] -image::images/vm_selection_screen.png[title="T-Rex VM launching screen",align="center",width=400, link="images/vm_selection_screen.png"] +image::images/vm_selection_screen.png[title="TRex VM launching screen",align="center",width=400, link="images/vm_selection_screen.png"] endif::backend-docbook[] ifdef::backend-xhtml11[] -image::images/vm_selection_screen.png[title="T-Rex VM launching screen",align="center",width=900, link="images/vm_selection_screen.png"] +image::images/vm_selection_screen.png[title="TRex VM launching screen",align="center",width=900, link="images/vm_selection_screen.png"] endif::backend-xhtml11[] @@ -93,35 +93,35 @@ Next, once the machine is finished booting, login to the machine using the follo - Password: `trex` ifdef::backend-docbook[] -image::images/trex_vm_login.png[title="T-Rex VM login",align="center",width=400, link="images/trex_vm_login.png"] +image::images/trex_vm_login.png[title="TRex VM login",align="center",width=400, link="images/trex_vm_login.png"] endif::backend-docbook[] ifdef::backend-xhtml11[] -image::images/trex_vm_login.png[title="T-Rex VM login",align="center",width=900, link="images/trex_vm_login.png"] +image::images/trex_vm_login.png[title="TRex VM login",align="center",width=900, link="images/trex_vm_login.png"] endif::backend-xhtml11[] TIP: a remote connection to the machine from anywhere in the hosting machine can be achieved by the following command: + `ssh -p 3022 trex@127.0.0.1` -=== Running T-Rex traffic generator +=== Running TRex traffic generator -Finally, we're ready to do some T-Rex magic. This is super easy and can be achived be the following steps: +Finally, we're ready to do some TRex magic. This is super easy and can be achived be the following steps: 1. Change dir to latest version supplied using: `cd /home/trex/v1.62/`. - 2. Run your desired T-Rex command. + 2. Run your desired TRex command. -IMPORTANT: When launching a T-Rex command pay attention make sure that: + +IMPORTANT: When launching a TRex command pay attention make sure that: + 1. use `sudo` prefix at the beggining of the command. + 2. Specify the `-f` argument first among all arguments. -For example, let's run T-Rex with DNS traffic generation and no latency. The Runnning command is: +For example, let's run TRex with DNS traffic generation and no latency. The Runnning command is: ---- [trex@localhost v1.62]$ sudo ./t-rex-64 -f cap2/dns.yaml -d 100 -m 1 --nc -Starting T-Rex 1.62 please wait ... +Starting TRex 1.62 please wait ... found configuration file at /etc/trex_cfg.yaml zmq publisher at: tcp://*:4500 @@ -185,11 +185,11 @@ listening on enp0s8, link-type EN10MB (Ethernet), capture size 262144 bytes Let's have a look at another example. + -We want to run T-Rex with simple http traffic generation. The running command will look like this: +We want to run TRex with simple http traffic generation. The running command will look like this: ---- [trex@localhost v1.62]$ sudo ./t-rex-64 -f cap2/http_simple.yaml -d 100 -l 1000 -m 1 --nc -Starting T-Rex 1.62 please wait ... +Starting TRex 1.62 please wait ... found configuration file at /etc/trex_cfg.yaml zmq publisher at: tcp://*:4500 @@ -263,9 +263,9 @@ listening on enp0s8, link-type EN10MB (Ethernet), capture size 262144 bytes See http://csi-wiki-01:8181/trex/doc/trex_book.pdf[T-Rex full manual] for a complete understading of the tool features and options. -=== T-Rex Live monitoring +=== TRex Live monitoring -Once we have T-Rex up and running, we can enjoy the benefit of having live monitoring on its performance, using TRexViewer application footnote:[Supported only on Windows OS] +Once we have TRex up and running, we can enjoy the benefit of having live monitoring on its performance, using TRexViewer application footnote:[Supported only on Windows OS] @@ -278,47 +278,47 @@ This can be easily done by following these steps: 2. Click the play button. ifdef::backend-docbook[] -image::images/trex_motinor_config.png[title="T-Rex viewer start screen",align="center",width=400,link="images/trex_motinor_config.png"] +image::images/trex_motinor_config.png[title="TRex viewer start screen",align="center",width=400,link="images/trex_motinor_config.png"] endif::backend-docbook[] ifdef::backend-xhtml11[] -image::images/trex_motinor_config.png[title="T-Rex viewer start screen",align="center",width=900,link="images/trex_motinor_config.png"] +image::images/trex_motinor_config.png[title="TRex viewer start screen",align="center",width=900,link="images/trex_motinor_config.png"] endif::backend-xhtml11[] 3. **That's it!** + - Now the live data from T-Rex will be displayed on the screen. + Now the live data from TRex will be displayed on the screen. ifdef::backend-docbook[] -image::images/trex_motinor_view.png[title="T-Rex viewer monitor screen",align="center",width=400,link="images/trex_motinor_view.png"] +image::images/trex_motinor_view.png[title="TRex viewer monitor screen",align="center",width=400,link="images/trex_motinor_view.png"] endif::backend-docbook[] ifdef::backend-xhtml11[] -image::images/trex_motinor_view.png[title="T-Rex viewer monitor screen",align="center",width=900,link="images/trex_motinor_view.png"] +image::images/trex_motinor_view.png[title="TRex viewer monitor screen",align="center",width=900,link="images/trex_motinor_view.png"] endif::backend-xhtml11[] [NOTE] -Make sure T-Rex is running, otherwise data will not be available at TRexViewer. +Make sure TRex is running, otherwise data will not be available at TRexViewer. === Architecture and network design -Since no hardware is used, T-Rex simulates traffic using a virtual internal network, named 'trex_intnet'. +Since no hardware is used, TRex simulates traffic using a virtual internal network, named 'trex_intnet'. -The following figure describes the virtual "wiring" of the virtual machine to support T-Rex traffic simulation. +The following figure describes the virtual "wiring" of the virtual machine to support TRex traffic simulation. ifdef::backend-docbook[] -image::images/T-Rex_vm.png[title="T-Rex virtual connectivity",align="center",width=400, link="images/T-Rex_vm.png"] +image::images/T-Rex_vm.png[title="TRex virtual connectivity",align="center",width=400, link="images/T-Rex_vm.png"] endif::backend-docbook[] ifdef::backend-xhtml11[] -image::images/T-Rex_vm.png[title="T-Rex virtual connectivity",align="center",width=900, link="images/T-Rex_vm.png"] +image::images/T-Rex_vm.png[title="TRex virtual connectivity",align="center",width=900, link="images/T-Rex_vm.png"] endif::backend-xhtml11[] -The VM runs T-Rex with single client and single server port. The traffic generated by each of those ports are switched over the 'trex_intnet' virtual network and received by the other side. +The VM runs TRex with single client and single server port. The traffic generated by each of those ports are switched over the 'trex_intnet' virtual network and received by the other side. -T-Rex identifies only the packets which were dedicately sent by one of those traffic ports and receives them in the other port. Hence, packets generated by client port will be received by the server port and vice versa. +TRex identifies only the packets which were dedicately sent by one of those traffic ports and receives them in the other port. Hence, packets generated by client port will be received by the server port and vice versa. -Ontop, network adapter #4 used to [underline]#listen# to all traffic generated by both of T-Rex's ports, therefore it is very useful in providing live data of the generated flows over the network. \ No newline at end of file +Ontop, network adapter #4 used to [underline]#listen# to all traffic generated by both of TRex's ports, therefore it is very useful in providing live data of the generated flows over the network. -- cgit 1.2.3-korg From 8b835f9a480c8dd37fef60762f0f9987a5d05b86 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 28 Jun 2015 17:02:00 +0300 Subject: minor update --- trex_vm_manual.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trex_vm_manual.asciidoc b/trex_vm_manual.asciidoc index f866ec19..6c1d7208 100755 --- a/trex_vm_manual.asciidoc +++ b/trex_vm_manual.asciidoc @@ -9,7 +9,7 @@ T-Rex Virtual Machine setup and basic usage == Introduction -=== T-Rex traffic generator +=== TRex traffic generator TRex traffic generator is a tool design the benchmark platforms with realistic traffic. This is a work-in-progress product, which is under constant developement, new features are added and support for more router's fuctionality is achieved. -- cgit 1.2.3-korg From 7f3cb2e67569d39879a37868cf6a4fcc6f600f1c Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 28 Jun 2015 17:17:43 +0300 Subject: minor update --- trex_vm_manual.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/trex_vm_manual.asciidoc b/trex_vm_manual.asciidoc index 6c1d7208..ce7e570c 100755 --- a/trex_vm_manual.asciidoc +++ b/trex_vm_manual.asciidoc @@ -260,7 +260,7 @@ listening on enp0s8, link-type EN10MB (Ethernet), capture size 262144 bytes ---- [NOTE] -See http://csi-wiki-01:8181/trex/doc/trex_book.pdf[T-Rex full manual] for a complete understading of the tool features and options. +See http://trex-tgn.cisco.com/trex/doc/trex_book.pdf[T-Rex full manual] for a complete understading of the tool features and options. === TRex Live monitoring @@ -270,7 +270,7 @@ Once we have TRex up and running, we can enjoy the benefit of having live monito This can be easily done by following these steps: - 0. Download the latest version of TrexViewer application and install it using http://csi-wiki-01:8080/display/bpsim/TrexViewer[this link]. + 0. Download the latest version of TrexViewer application and install it using http://trex-tgn.cisco.com/trex/client_gui/setup.exe[this link]. 1. Start the application and fill in the following: + - Trex ip: `127.0.0.1:4500` -- cgit 1.2.3-korg From a85c8d3d5247319c8e50ee62e19993d5ad9b5e31 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 5 Aug 2015 17:37:46 +0300 Subject: add release --- release_notes.asciidoc | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 4300ba06..1bc0ba88 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,16 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.75 == + +=== fix issues: === + +* First version that works from GitHub/Git - init script are in the output package + +== Release 1.75 == + +* This version does not work, no init script + == Release 1.72 == -- cgit 1.2.3-korg From be48c668a6c24c5c3d7b8a749719b508e855ced3 Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Mon, 10 Aug 2015 18:05:45 +0300 Subject: added control plane build method --- wscript | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/wscript b/wscript index b4fe280c..127ec2e1 100755 --- a/wscript +++ b/wscript @@ -10,6 +10,8 @@ VERSION='0.0.1' APPNAME='wafdocs' import os, re, shutil +import shlex +import subprocess top = '.' @@ -117,6 +119,19 @@ def do_visio(bld): for x in bld.path.ant_glob('visio\\*.vsd'): tg = bld(rule='${VIS} -i ${SRC} -o ${TGT} ', source=x, target=x.change_ext('.png')) +def build_cp_docs (trex_src_dir, dest_dir = "_build", builder = "html"): + build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -b {bld} {src} {dst}".format( + bld= builder, + src= ".", + dst= dest_dir) + ) + bld_path = os.path.abspath( os.path.join(trex_src_dir, 'automation', 'trex_control_plane', 'doc') ) + ret_val = subprocess.call(build_doc_cmd, cwd = bld_path) + if ret_val: + raise RuntimeError("Build operation of control plain docs failed with return value {ret}".format(ret= ret_val)) + return + + def build(bld): bld(rule=my_copy, target='symbols.lang') -- cgit 1.2.3-korg From d246cd00853802c08d0bdc52eebdf92db73be263 Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Mon, 10 Aug 2015 18:33:46 +0300 Subject: updated wscript --- wscript | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/wscript b/wscript index 127ec2e1..68b617d8 100755 --- a/wscript +++ b/wscript @@ -188,6 +188,15 @@ def build(bld): bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='trex_control_plane_peek.asciidoc waf.css', target='trex_control_plane_peek.html', scan=ascii_doc_scan) + # generate control plane documentation + export_path = os.path.join(os.getcwd(), 'build') + trex_core_git_path = os.getenv('TREX_CORE_GIT', None) + if trex_core_git_path: # there exists the desired ENV variable. + build_cp_docs(trex_core_git_path, dest_dir= export_path) + else: + raise NameError("Environment variable 'TREX_CORE_GIT' is not defined.") + + -- cgit 1.2.3-korg From da40902ba17b68ee6d4337411b4387a414856a7f Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Mon, 10 Aug 2015 18:45:39 +0300 Subject: fixed indentation, changed output to designated cp_docs folder --- wscript | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/wscript b/wscript index 68b617d8..51f2d103 100755 --- a/wscript +++ b/wscript @@ -188,13 +188,13 @@ def build(bld): bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='trex_control_plane_peek.asciidoc waf.css', target='trex_control_plane_peek.html', scan=ascii_doc_scan) - # generate control plane documentation - export_path = os.path.join(os.getcwd(), 'build') - trex_core_git_path = os.getenv('TREX_CORE_GIT', None) - if trex_core_git_path: # there exists the desired ENV variable. - build_cp_docs(trex_core_git_path, dest_dir= export_path) - else: - raise NameError("Environment variable 'TREX_CORE_GIT' is not defined.") + # generate control plane documentation + export_path = os.path.join(os.getcwd(), 'build', 'cp_docs') + trex_core_git_path = os.getenv('TREX_CORE_GIT', None) + if trex_core_git_path: # there exists the desired ENV variable. + build_cp_docs(trex_core_git_path, dest_dir= export_path) + else: + raise NameError("Environment variable 'TREX_CORE_GIT' is not defined.") -- cgit 1.2.3-korg From 72432608f20e30ff6ba80022447824e0e9648e4f Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Mon, 10 Aug 2015 18:51:20 +0300 Subject: fixed reference bug --- wscript | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wscript b/wscript index 51f2d103..9e523152 100755 --- a/wscript +++ b/wscript @@ -125,7 +125,7 @@ def build_cp_docs (trex_src_dir, dest_dir = "_build", builder = "html"): src= ".", dst= dest_dir) ) - bld_path = os.path.abspath( os.path.join(trex_src_dir, 'automation', 'trex_control_plane', 'doc') ) + bld_path = os.path.abspath( os.path.join(trex_src_dir, 'scripts', 'automation', 'trex_control_plane', 'doc') ) ret_val = subprocess.call(build_doc_cmd, cwd = bld_path) if ret_val: raise RuntimeError("Build operation of control plain docs failed with return value {ret}".format(ret= ret_val)) -- cgit 1.2.3-korg From e34d5f239ed6146ebd16fa52023f6570b112e33f Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 11 Aug 2015 02:03:03 +0300 Subject: add filter --- .gitignore | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..fb87dd11 --- /dev/null +++ b/.gitignore @@ -0,0 +1,23 @@ +# Compiled source # +################### +.lock-waf* +.waf* +linux* +build/ +*.pyc + + +# Packages # +############ +# it's better to unpack these files and commit the raw source +# git has its own built in compression methods +*.7z +*.dmg +*.gz +*.iso +*.jar +*.rar +*.tar +*.zip + + -- cgit 1.2.3-korg From b997375f6ce668b3601fa1a01284ff7fd3f8e303 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 11 Aug 2015 03:15:42 +0300 Subject: add publish/release new function --- trex_book.asciidoc | 4 +-- wscript | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 2 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 5e381356..3d5bd449 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -6,7 +6,7 @@ TRex :quotes.++: :numbered: :web_server_url: http://trex-tgn.cisco.com/trex - +:local_web_server_url: csi-wiki-01:8181/trex @@ -182,7 +182,7 @@ $[root@trex]lspci | grep Network Connect by ssh to the TRex machine and do the following: -assuming *$WEB_URL* is *{web_server_url}* +assuming *$WEB_URL* is *{web_server_url}* or *{local_web_server_url}* (cisco internal) [source,bash] ---- diff --git a/wscript b/wscript index 9e523152..9517813f 100755 --- a/wscript +++ b/wscript @@ -197,6 +197,79 @@ def build(bld): raise NameError("Environment variable 'TREX_CORE_GIT' is not defined.") +class Env(object): + @staticmethod + def get_env(name) : + s= os.environ.get(name); + if s == None: + print "You should define $",name + raise Exception("Env error"); + return (s); + + @staticmethod + def get_release_path () : + s= Env().get_env('TREX_LOCAL_PUBLISH_PATH'); + s +=get_build_num ()+"/" + return s; + + @staticmethod + def get_remote_release_path () : + s= Env().get_env('TREX_REMOTE_PUBLISH_PATH'); + return s; + + @staticmethod + def get_local_web_server () : + s= Env().get_env('TREX_WEB_SERVER'); + return s; + + # extral web + @staticmethod + def get_trex_ex_web_key() : + s= Env().get_env('TREX_EX_WEB_KEY'); + return s; + + @staticmethod + def get_trex_ex_web_path() : + s= Env().get_env('TREX_EX_WEB_PATH'); + return s; + + @staticmethod + def get_trex_ex_web_user() : + s= Env().get_env('TREX_EX_WEB_USER'); + return s; + + @staticmethod + def get_trex_ex_web_srv() : + s= Env().get_env('TREX_EX_WEB_SRV'); + return s; + + @staticmethod + def get_trex_core() : + s= Env().get_env('TREX_CORE_GIT'); + return s; + + + +def release(bld): + # copy all the files to our web server + core_dir = Env().get_trex_core() + release_dir = core_dir +"/scripts/doc/"; + os.system('mkdir -p '+release_dir) + os.system('cp -rv build/release_notes.* '+ release_dir) + + +def publish(bld): + # copy all the files to our web server + remote_dir = "%s:%s" % ( Env().get_local_web_server(), Env().get_remote_release_path ()+'../doc/') + os.system('rsync -av --rsh=ssh build/ %s' % (remote_dir)) + + +def publish_ext(bld): + from_ = 'build/' + os.system('rsync -avz -e "ssh -i %s" --rsync-path=/usr/bin/rsync %s %s@%s:%s/doc/' % (Env().get_trex_ex_web_key(),from_, Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path() ) ) + + + -- cgit 1.2.3-korg From 80e6e38bea8e0249a86eb0d97d2ebc8f10d4795c Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 18 Aug 2015 05:27:28 +0300 Subject: minor fix --- trex_preso.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/trex_preso.asciidoc b/trex_preso.asciidoc index 50f4ffc7..1e66f69c 100755 --- a/trex_preso.asciidoc +++ b/trex_preso.asciidoc @@ -14,9 +14,9 @@ TRex realistic traffic generator == What problem is being solved? -* Network elements include complex statful features -* Require testing with statful and real traffic mix -* Traffic generators of statful/realistic traffic are: +* Network elements include complex stateful features +* Require testing with stateful and real traffic mix +* Traffic generators of stateful/realistic traffic are: ** Expensive ~$100-500K ** Not scalable for high rates ** Not flexible -- cgit 1.2.3-korg From 41b6fc7e9beb319bf15aa70be3565092831acf3c Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 23 Aug 2015 08:48:31 +0300 Subject: git vm commands --- vm_doc.asciidoc | 220 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ wscript | 3 + 2 files changed, 223 insertions(+) create mode 100644 vm_doc.asciidoc diff --git a/vm_doc.asciidoc b/vm_doc.asciidoc new file mode 100644 index 00000000..a61afc3c --- /dev/null +++ b/vm_doc.asciidoc @@ -0,0 +1,220 @@ + +== VM instructions + +=== instructions_type + +these are the instructions type + + +==== fix_checksum_ipv4 + +This command will fix ipv4 checksum header + +[source,python] +---- +{ +ins_name : string ,"fix_checksum_ipv4" ## this command will recalculate the ipv4 checksum +pkt_offset : uint16_t, number, ## the offset into the packet when the ipv4 header is located +} +---- + + +==== flow_man_simple + +This command will allocate and manipulate flow object data +For example, allocate a flow variable and object from 10.0.0.1-10.0.0.10 + +[source,python] +---- +{ +ins_name: string, "flow_man_simple" ## increment a flow variable +flow_varible_name: string "name_of_varible" # internal software will allocate the object for this, the name should be unique +object_size : uint16_t #size of the variable 1,2,4,8 ( max uint64) +Operation : "inc","dec","random" # the command could be inc from min-max start at init + # decrement + # random +split_by_core : true/false ##do we want to split the range by cores +init_value : number, size of object_size (max uint64) +min_value : number, size of object_size (max uint64) +max_value : number, size of object_size (max uint64) +} +---- + +==== write_to_pkt + +This command will copy flow varible into packet offset + +[source,c] +---- +tmp_pkt_data = (flow_var +add_value) + +if (big_edian){ + (varible_size )pkt[pkt_offset] =swap(tmp_pkt_data); +} +---- + + +[source,python] +---- +{ +ins_name : string , "write_to_pkt" ## +flow_varible_name : string "name_of_varible" # flow varible value to copy from +pkt_offset : uint16_t # the offset into the packet to copy the varible +add_value : 0 (size_of_the_varible)# when writing add this value +big_edian : bool default true # swap varible when copy yo packet +} +---- + + + +=== Examples + +=== Examples1 + +an examples to a programs that change src_ip in specific range for one core +range of src_ip 10.0.0.1-10.0.0.10 start from 10.0.0.7 +update ipv4 checksum +ip offset is in 14 + + offset +[ 6 - dest mac 0 + 6 - src mac 6 + 2 network 12 + + ip[0] 14 + ip[4] 18 + ip[8]-TTL,Protocol 22 + ip[12]-src_ip 26 + ip[12]-dest_ip 30 + +} + +The program + +[source,python] +---- +[ + +{ +ins_name : "flow_data_inc" +flow_varible_name : "src_ip" +object_size : 1 +operaqtion : "inc" +split_by_core : false # one core +init_value : 7 +min_value : 1 +max_value : 10 +} , + +{ +ins_name : "write_to_pkt" +flow_varible_name : "src_ip" +pkt_offset : 26, +add_value : 0 , +big_edian : true +}, + +{ +ins_name : "fix_checksum_ipv4" +pkt_offset : 14 +} + +] +---- + +=== Examples2 + +an examples to a programs that change src_ip and dest_ip in specific range for one core +range of src_ip 10.0.0.1-10.0.0.10 start from 10.0.0.7 +range of dest_ip 48.0.0.1-48.0.0.10 start from 48.0.0.7 + +update ipv4 checksum +ip offset is in 14 + + offset +[ 6 - dest mac 0 + 6 - src mac 6 + 2 network 12 + + ip[0] 14 + ip[4] 18 + ip[8]-TTL,Protocol 22 + ip[12]-src_ip 26 + ip[12]-dest_ip 30 + +} + +The program + +[source,python] +---- + +[ + +{ +ins_name : "flow_data_inc" +flow_varible_name : "src_ip" +object_size : 1 +operaqtion : "inc" +split_by_core : false # one core +init_value : 7 +min_value : 1 +max_value : 10 +} , + +{ +ins_name : "write_to_pkt" +flow_varible_name : "src_ip" +pkt_offset : 26, +add_value : 0 , +big_edian : true +}, + +{ +ins_name : "write_to_pkt" +flow_varible_name : "src_ip" +pkt_offset : 30, +add_value : 0 , +big_edian : true +}, + + +{ +ins_name : "fix_checksum_ipv4" +pkt_offset : 14 +} +] + +---- + + +=== Considerations + + +==== Control-Plain check + +- Verify that packet offset into fix_checksum_ipv4 is less that pkt_size - min_ip_header +- There is no stream that are orphaned (not started at startup and nobody call them) + +==== Data-Plain check + +- Convert the commands to a VM compress command +- Allocate flow memory per flow for each stream (currently add the memory in each offset) +- VM runner at startup/ each packet + + + + + + + + + + + + + + + + + diff --git a/wscript b/wscript index 9517813f..62ed702b 100755 --- a/wscript +++ b/wscript @@ -182,6 +182,9 @@ def build(bld): bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='trex_vm_manual.asciidoc waf.css', target='trex_vm_manual.html', scan=ascii_doc_scan) + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='vm_doc.asciidoc waf.css', target='vm_doc.html', scan=ascii_doc_scan) + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.html', scan=ascii_doc_scan) -- cgit 1.2.3-korg From 0e90f9e313d52a4bde41c6576ffef1741eac10e3 Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Tue, 25 Aug 2015 09:37:54 +0300 Subject: updated .gitignore file --- .gitignore | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index fb87dd11..a4bdc226 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,19 @@ # Compiled source # ################### -.lock-waf* -.waf* +*.com +*.class +*.dll +*.exe +*.o +*.so +.lock* +linux_dpdk* linux* -build/ +scripts/_t-rex-* +scripts/bp-sim-* +scripts/doc/* +scripts/mock-* + *.pyc @@ -19,5 +29,28 @@ build/ *.rar *.tar *.zip + +# Logs and databases # +###################### +*.log +*.sql +*.sqlite +# OS generated files # +###################### +.DS_Store +.DS_Store? +._* +.Trashes +ehthumbs.db +Thumbs.db + +# IDE/ Editors files # +###################### +.idea/ +*.vpj +*.vpw +*.vtg +*.vpwhist + -- cgit 1.2.3-korg From 8f9298ac71ee09f3ebb36ef5ac2724e8f986e0da Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 2 Sep 2015 05:45:42 +0300 Subject: add packet builder --- packet_builder_yaml.asciidoc | 121 +++++++++++++++++++++++++++++++++++++++++++ wscript | 3 ++ 2 files changed, 124 insertions(+) create mode 100644 packet_builder_yaml.asciidoc diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc new file mode 100644 index 00000000..1252e73e --- /dev/null +++ b/packet_builder_yaml.asciidoc @@ -0,0 +1,121 @@ + +== A file format for GUI packet builder + +=== Introduction + +We would like a file that will be read by GUI and will give us the ability to build packets using GUI + +The format should be *YAML* + + +=== High Level Requirement + +* Define a YAML object format for dynamic building of packets and a program that change various fields +* Ability to *parse* back the same buffer that was created using this tool (reversibility) +** Ability to load packet from a pcap file and parse it +* Ability to save the packet to a pcap file +* Ability to save the packet and program in JSON format (same JSON-RPC format) +* Set a value for any field of any protocol +* Vary packet fields across packets at run time e.g. changing IP/MAC addresses +* Stack protocols in any arbitrary order define in YAML format + +=== Header that should be supported (first phase) + +==== L2 + +* Ethernet +* 802.3 +* LLC SNAP +*VLAN (with QinQ) stack +*MPLS stack + +==== L3 + +* ARP +* IPv4 +* IPv6 (4x header) +* IP-in-IP a.k.a IP Tunnelling (6over4, 4over6, 4over4, 6over6) + +==== L4 + +* TCP +* UDP +* ICMPv4 +* ICMPv6 +* IGMP + +==== L7 + +* Any text based protocol (HTTP, SIP, RTSP, NNTP etc.) +** random string +** repeat string + +* Pattern Binary +** repeat of value (e.g 0x55) +** random +** seq (1,2,3,3,4) +** User Hex Dump editor + + +=== YAML Format + + +==== Header section + +It describes objects like TCP/UDP/VLAN/MPLS/IPV6/IPv6 Headers/ICMPv4/ICMPv6/IGMP + +Tree line object description + +[source,python] +---- + - name : object name ( object ) + - Fields + -- bits or bytes + -- Array size of types (default 1) + -- Type (uint8, uint16_t, uint32_t, uint16_t, string, string_bl) + -- edit_type [ipv4, ipv6, buffer,string , string_nl] + -- edian - can be default as big, + -- Default value (buffer 0x00,0x12,0x13), format- + -- Exception: ipv4_header_size (bitsx44, ipv4_checksum, tcp_udp_checsum, ipv4_total_length) + -- header_size_multi (this field represents the + -- childes_field_list : could be null in default TCP flags is an example of this + -- *Choice* on by one fields [ Sub Tree] example {01 : Object1 , 02: object2 , 03:Object5} + -- Fields that can't change without repeat from the start in the GUI for example ip_ength, protocol , + -- Next protocol ( dict value : name ) + + + PFP -fields + -- ipv4/ipv6 range min-max + random min-max + -- every uint8,uint16_t, uint32_t could be create a range +---- + + +==== Relations between object headers + +* Represents the order of the headers in the build process. +* It is a tree like relation and could have a loop in the tree (ip inside ip) + +[source,python] +---- + +root -> L2 ( Ethernet , 802.3 , LLC SNAP ) + |( by field ) + | + ------------------------------------- ( VLAN (with QinQ), MPLS , ipv4, ipv6, ARP , ICMP ) + | | | | + | ipv4/ipv6 - - + | | + | | + [Possibility - Ethernet/802.3/LLC SNAP) | UDP/TCP/Pyload + Object | | + for each option there tree of all the option --- - +---- + + +=== Resource +* link:https://wireedit.com/[WireEdit] +* link:https://code.google.com/p/ostinato/[ostinato] +* link:http://www.slideshare.net/nlekh/ixiaexplorer[IxExplorer] + + diff --git a/wscript b/wscript index 62ed702b..e8c1ef0d 100755 --- a/wscript +++ b/wscript @@ -185,6 +185,9 @@ def build(bld): bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='vm_doc.asciidoc waf.css', target='vm_doc.html', scan=ascii_doc_scan) + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='packet_builder_yaml.asciidoc waf.css', target='packet_builder_yaml.html', scan=ascii_doc_scan) + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.html', scan=ascii_doc_scan) -- cgit 1.2.3-korg From 9eb5530fb7a07563a1d3bc4fddd7bcc05501c7b4 Mon Sep 17 00:00:00 2001 From: imarom Date: Thu, 3 Sep 2015 05:58:38 +0300 Subject: initial draft of RPC doc --- trex_rpc_server_spec.asciidoc | 189 ++++++++++++++++++++++++++++++++++++++++++ wscript | 4 + 2 files changed, 193 insertions(+) create mode 100644 trex_rpc_server_spec.asciidoc diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc new file mode 100644 index 00000000..13d86c48 --- /dev/null +++ b/trex_rpc_server_spec.asciidoc @@ -0,0 +1,189 @@ +The TRex RPC Server +=================== +:author: imarom +:email: +:revnumber: 1.70-0.0 +:quotes.++: +:numbered: +:web_server_url: http://trex-tgn.cisco.com/trex +:local_web_server_url: csi-wiki-01:8181/trex + + + +== RPC Support On TRex + +TRex implements a RPC protocol in order to config, view and +in general execute remote calls on TRex + +In this document we will provide information on +how a client can implement the protocol used to communicate with TRex + +In general, we will describe the following: + +* *Transport Layer* - The transport layer used to communicate with TRex server +* *RPC Reprensentation Protocol* - The format in which remote procedures are carried + +=== Transport Layer + +TRex server transport layer is implemented using ZMQ. + +The default configuration is TCP on port 5555, however this is configurable. + +{zwsp} + +The communication model is based on the request-reply ZMQ model: + +http://zguide.zeromq.org/page:all#Ask-and-Ye-Shall-Receive + +{zwsp} + + +for more on ZMQ and implementation please refer to: +{zwsp} + +http://zeromq.org/intro:read-the-manual + +=== RPC Reprensentation Protocol + +The RPC reprensentation protocol is JSON RPC v2.0. +Every request and response will be encoded in a JSON RPC v2.0 format. + +{zwsp} + + +For more info on JSON RPC v2.0 spec please refer to: +{zwsp} + + +http://www.jsonrpc.org/specification + +{zwsp} + + +Later on in the document we will describe all the supported commands. + +=== TRex RPC Mock Server +Before we get into the commands, it's worth mentioning that TRex has a mock RPC server +designed to allow playing around with the server in order to understand the response +and perform adjustments to the request. + +TRex also provides a Python based console that can connect to the server (mock or real) and +send various commands to the server. + +==== Building The Mock Server +Building the mock server is performed like this: +[source,bash] +---- +trex-core/linux> ./b configure +trex-core/linux> ./b --target=mock-rpc-server-64-debug +---- + +==== Running The Mock Server +Launching the mock server is performed like this: + +[source,bash] +---- + +trex-core/scripts> ./mock-rpc-server-64-debug + +-= Starting RPC Server Mock =- + +Listening on tcp://localhost:5050 [ZMQ] + +Setting Server To Full Verbose + +Server Started + +---- + +==== Using The TRex Console To Interact +When the mock server is up, you can already send commands to the server. +{zwsp} + +{zwsp} + + +Let's demonstrate the operation with the Python based TRex console: + +{zwsp} + + +[source,bash] +---- +trex-core/scripts> ./trex-console + +Connecting To RPC Server On tcp://localhost:5050 +[SUCCESS] + + +-=TRex Console V1.0=- + +Type 'help' or '?' for supported actions + +TRex > + +---- +As we will see later on, a basic RPC command supported by the server is 'ping'. +{zwsp} + +Let's issue a ping command to the server and see what happens on both sides: + +{zwsp} + +{zwsp} + + +On the 'client' side: + +[source,bash] +---- +TRex > verbose on + +verbose set to on + +TRex > ping + +-> Pinging RPC server +[verbose] Sending Request To Server: + +{ + "id": "l0tog11a", + "jsonrpc": "2.0", + "method": "ping", + "params": {} +} + +[verbose] Server Response: + +{ + "id": "l0tog11a", + "jsonrpc": "2.0", + "result": "ACK" +} + +[SUCCESS] + +---- +On the 'server' side: + +[source,bash] +---- + +trex-core/scripts> ./mock-rpc-server-64-debug + +-= Starting RPC Server Mock =- + +Listening on tcp://localhost:5050 [ZMQ] + +Setting Server To Full Verbose + +Server Started + + +[verbose][req resp] Server Received: + +{ + "id" : "maa5a3g1", + "jsonrpc" : "2.0", + "method" : "ping", + "params" : {} +} + +[verbose][req resp] Server Replied: + +{ + "id" : "maa5a3g1", + "jsonrpc" : "2.0", + "result" : "ACK" +} + +---- + diff --git a/wscript b/wscript index 62ed702b..17e976d3 100755 --- a/wscript +++ b/wscript @@ -185,6 +185,10 @@ def build(bld): bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='vm_doc.asciidoc waf.css', target='vm_doc.html', scan=ascii_doc_scan) + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='trex_rpc_server_spec.asciidoc waf.css', target='trex_rpc_server_spec.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.html', scan=ascii_doc_scan) -- cgit 1.2.3-korg From a6c1f99909e96e643e20f62ab536a470c48ff42f Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 3 Sep 2015 14:19:11 +0300 Subject: update pkt builder --- .gitignore | 1 + packet_builder_yaml.asciidoc | 386 +++++++++++++++++++++++++++++++++++++++---- 2 files changed, 358 insertions(+), 29 deletions(-) diff --git a/.gitignore b/.gitignore index fb87dd11..c58072ed 100644 --- a/.gitignore +++ b/.gitignore @@ -19,5 +19,6 @@ build/ *.rar *.tar *.zip +*.vpj diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc index 1252e73e..00b69649 100644 --- a/packet_builder_yaml.asciidoc +++ b/packet_builder_yaml.asciidoc @@ -44,7 +44,7 @@ The format should be *YAML* * ICMPv6 * IGMP -==== L7 +==== L7 anchor:Payload[] * Any text based protocol (HTTP, SIP, RTSP, NNTP etc.) ** random string @@ -59,43 +59,141 @@ The format should be *YAML* === YAML Format - ==== Header section -It describes objects like TCP/UDP/VLAN/MPLS/IPV6/IPv6 Headers/ICMPv4/ICMPv6/IGMP +.Default Types anchor:Types[] +[options="header",cols="1,2,3"] +|================= +| Field Name | meaning | size in bits +| bit | describe the header object e.g tcp | 1 +| uint8 | describe the header object e.g tcp | 8 +| uint16 | the name in the GUI | 16 +| uint32 | sub fields of this header | 32 +| uint64 | sub fields of this header | 64 +| Field_Type | name of a field type that define | The size of the field type "mac-addr" +| Payload | xref:Payload[Payload] | total packet size - all header until now +| vlen | varible length field, take the reset of the field | total size of the object +|================= + + +.Default Edit_Types anchor:Edit_Types[] +[options="header",cols="1,2"] +|================= +| Field Name | meaning +| none | use Hex Editor as Types +| ipv4_t | should match uint32_t type +| mac_addr_t | 00:00:00:00:00:00 define a regexp here TODO +| ipv4_mask_t| should match uint32_t type +| ipv6_t | should have 16 bytes field size 8x16 +| ipv6_mask_t | should have 16 bytes field size 8x16 +| another header class | sub fields of this header +| char_t | array of bytes , look into the array_size of cost string +| var_char_t | array based on a field value look into +| regexp_t | define a Java function that converts a reg exp string to a buffer see here xref:GenRegExp[RegExp] +|================= + + + + +.Default Exec anchor:Exec[] +[options="header",cols="1,2"] +|================= +| Field Name | meaning +| none | nothing special +| ipv4_checksum | auto calculates checksum on this header Ipv4 type +| tcp_udp_checsum | calculate next TCP checksum +| ipv4_total_length | calculate ipv4 total length +| tlv_length | the length of the field in bytes -1 for TLV +|================= + + +.Field_Type anchor:Field_Type[] +[options="header",cols="^1,^1,30,^1,^1,30"] +|================= +| Field Name | value type | meaning | Default Value | Link | Example +| class | string | describe the class type | in case class is defined no need to have name and vise versa | | class : tcp +| name | string | describe the instance name | in case class is defined no need to have name and vise versa | | class : tcp +| help | string | the name in the GUI | no | | class : TCP +| array_size | integer | how many objects of this type, default value is 1 | 1 | | array_size : 6 in case of mac-addr +| type | string | type, see Types define the size | "uint8_t" | xref:Types[Types] | type : "uint32_t" type : "mac_addr" +| edit_type | string | edit_type Edit_Types | "none" | xref:Edit_Types[Edit_Types] | edit_type could get edit_type_regexp e.g edit_type = "ipv4" , edit_type = "regexp" edit_type_regexp = "string that define regexp and Java function" +| edit_type_regexp | string | in case it is reg_exp the name of the function |"none" | xref:GenRegExp[GenRegExp] | +| edian | bool | big or little edian | true | | in default all fields are big +| default | array of bytes | default value in the packets | [0 ]x header size | | +| exec | string | exception processing by java checsum/length | none | xref:Exec[Exec] | +| choice | array | define the next protocol see | none | xref:Choice[Choice] | +| change_possible | bool | is it possible to change this field after we added next fields. in some cases we can't change field after we build next protocol | true | | +| fields | array | array of Field_Type | [] | | fields : [ ] +| offset | integer/string | offset into the packet in bits, in case of auto add base of prev fields | "auto" | | | +| option | string | a java code that define a way to calculate varible size | "none" | | | + +|================= + + +.Choice anchor:Choice[] +Example of Choice +[source,python] +---- + fields : + - key : 0x0800 + val : "ip" + + - key : 0x0860 + val : "ipv6" + + - key : 0x0810 # just an example don't realy remember the numbers + val : "vlan" + + - key : 0x0812 + val : "mpls" + default : [ "payload" ,"ip","tcp","sip"] the number could be any value +---- -Tree line object description + +.Generic RegExp Edit Field anchor:GenRegExp[] + +This will define a regexp that match for user input and how to converts it to buffer of bytes [source,python] ---- - - name : object name ( object ) - - Fields - -- bits or bytes - -- Array size of types (default 1) - -- Type (uint8, uint16_t, uint32_t, uint16_t, string, string_bl) - -- edit_type [ipv4, ipv6, buffer,string , string_nl] - -- edian - can be default as big, - -- Default value (buffer 0x00,0x12,0x13), format- - -- Exception: ipv4_header_size (bitsx44, ipv4_checksum, tcp_udp_checsum, ipv4_total_length) - -- header_size_multi (this field represents the - -- childes_field_list : could be null in default TCP flags is an example of this - -- *Choice* on by one fields [ Sub Tree] example {01 : Object1 , 02: object2 , 03:Object5} - -- Fields that can't change without repeat from the start in the GUI for example ip_ength, protocol , - -- Next protocol ( dict value : name ) - - - PFP -fields - -- ipv4/ipv6 range min-max - random min-max - -- every uint8,uint16_t, uint32_t could be create a range ----- + +class MyClass : public RegExpBase { + public: + + + string get_reg_exp_string( ) { + return ((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})) + } + # in case of match + buffer get_buffer(){ + g= [get_group()[1].to_int()*256,get_group()[1].to_int()] + # return list + return (g) + } + +} + +---- + + ==== Relations between object headers -* Represents the order of the headers in the build process. -* It is a tree like relation and could have a loop in the tree (ip inside ip) +There would be a root object to point to possible choice + + +[source,python] +---- + +- class : "root" + help : "Root" + choice : + default : [ "ethrenet" ,"llc","_802-3"] +---- +So in a way you could define a tree like this + [source,python] ---- @@ -113,9 +211,239 @@ root -> L2 ( Ethernet , 802.3 , LLC SNAP ) ---- +==== Rules + +* The size of the header and offset is automatically defined in default by the order of the fields ( inc by type size multiply by array_size) +* It can be overrided by offset field ( put offset in the object ) and then an more advanced field can be shown earlier in the GUI +* The packet size is defined before the headers. Header Should not be allowed to be added if the size + header size is bigger than packet size +* "Payload" is predefined Fields that take the reset of the packet and user can edit it ( see xref:Payload[Payload] ) +* There would be a spare field in the Stream object so GUI could add more metadata for reconstructing the builder types + for example in this example Ethrenet/IP/TCP/IP/TCP you can't extrac from buffer alone that Payload is IP/TCP only the builder known that in build time. +* Ip total length need to keep the total_pkt_size - this ip header . this should work for internal header too. + + + +=== Example TCP/IP + + +[source,python] +---- + + - class : "c-mac-addr" + help : "Mac addrees" + type : "uint8" + array_size : 6 + edit_type : "mac-addr_t" # format (\d\d[:]){5}[:]\d\d + default_value : [0x00,0x00,0x01,0x00,0x00,0x00] + + + - class : "c-ethr-l2" + help : "Ethernet-L2" + fields : + - name : "Dst" + help : "destination mac" + type : "c-mac-addr" + + - name : "Src" + help : "source mac" + type : "c-mac-addr" + + - name : "ip_protocol" + type : "uint16_t" + default_value : [0x08,0x00] + choice : + fields : + - key : 0x0800 + val : "ip" + + - key : 0x0860 + val : "ipv6" + + - key : 0x0810 # just an example don't realy remember the numbers + val : "vlan" + + - key : 0x0812 + val : "mpls" + default : "payload" + + + - class : "ipv4" + help : "Ipv4" + fields : + - name : "ver" + help : "Version" + type : "bit" + array_size : 4 + default : [4] + + - name : "ihl" + help : "IHL" + type : "bit" + array_size : 4 + default : [7] + exec : "ipv4_ihl" + change_possible : false + + .. + + - name : "hdr_chsum" + help : "Header Checksum" + default : [0x00,0x00] + exec : "ipv4_check_sum" + change_possible : false + + - name : "total_len" + help : "Total Length" + default : [0x00,0x00] + exec : "ipv4_total_len" # auto calculate total_size-offset_header + + - name : "protocol" + help : "Protocol" + type : uint8_t + default_value : [0x06] + choice : + fields: + - key : 0x06 + val : "tcp" + + - key : 0x11 + val : "udp" + + - key : 0x01 # just an example don't realy remember the numbers + val : "ip" + + - key : 0x0812 + val : "gre" + default : "payload" + + - name : "src_addr" + help : "Source Address" + type : uint32_t + default : [0x10,0x00,0x00,0x00] + edit_type : "ipv4" # reserve + + - name : "dst_addr" + help : "Destination Address" + default : [0x30,0x00,0x00,0x00] + type : uint32_t + edit_type : "ipv4" # reserve + + + - class : "tcp" + help : "TCP" + fields : + - name : "src_port" + help : "Source Port" + default : [0x30,0x00] + type : uint16_t + + - name : "dest_port" + help : "Source Port" + default : [0x30,0x00] + type : uint16_t + + - name : "seq" + help : "Seq Number" + type : uint32_t + default : [0x30,0x00,00,00] + + - name : "ack" + help : "Ack Number" + type : uint32_t + default : [0x30,0x00,00,00] + + ... + + - name : "flags" # tree with leaf of bits + help : "Ack Number" + type : uint8_t + default : [0x30] + fields : + - name : "urg" + help : "URG" + type : bit + default : [0x0] + + - name : "ack" + help : "ACK" + type : bit + default : [0x1] + .. + + - name : "checksum" + help : "Checksum" + type : uint16_t + default : [0x00,0x00] + exec : "tcp_checksum" # auto calculate total_size-offset_header + + +- class : "root" # reserve + help : "Root" + choice : + default : [ "ethrenet" ,"llc","_802-3"] +--------------------------- + +=== Example IP Option + +see here TLV + +0 : END +1 : Length 1 +other : Byte : Length ( +first) |option + + link:http://tools.ietf.org/html/rfc791[ip_option] + + +[source,python] +---- + + - class : "ip_option_131" + help : "ip_option" + fields : + - name : "length" # tree with leaf of bits + help : "length" + type : uint8_t + exec : "tlv_length" + + - name : "pointer" # tree with leaf of bits + type : uint8_t + + - name : "buffer" # tree with leaf of bits + type : "tlv_reset" + + + - class : "ip_option" + help : "ip_option" + type : uint8_t + default_value : [0x01] + choice : + fields: + - key : 0x00 + val : "none" # no next the parent has + + - key : 0x01 + val : "ip_option" + + - key : 0x131 + val : "ip_option_131" + + - key : 0x01 # just an example don't realy remember the numbers + val : "ip" + + - key : 0x0812 + val : "gre" + default : "payload" + + +---- + +* case of varible length field ip_option example + + + + === Resource * link:https://wireedit.com/[WireEdit] -* link:https://code.google.com/p/ostinato/[ostinato] +* link:http://ostinato.org/[ostinato] * link:http://www.slideshare.net/nlekh/ixiaexplorer[IxExplorer] - -- cgit 1.2.3-korg From fe4d77eabaaf73bbaab7f4432ed45a32e0d43471 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 3 Sep 2015 15:00:31 +0300 Subject: packet_builder first version --- packet_builder_yaml.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc index 00b69649..1ca45378 100644 --- a/packet_builder_yaml.asciidoc +++ b/packet_builder_yaml.asciidoc @@ -1,6 +1,9 @@ == A file format for GUI packet builder +version : 0.01 +originator : hhaim + === Introduction We would like a file that will be read by GUI and will give us the ability to build packets using GUI -- cgit 1.2.3-korg From a10505930b668ba3dd35717d01d2c3beff6c7afc Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 6 Sep 2015 03:47:58 +0300 Subject: v0.2 packet builder --- packet_builder_yaml.asciidoc | 276 +++++++++++++++++++++++++++++++------------ 1 file changed, 201 insertions(+), 75 deletions(-) diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc index 1ca45378..f8dd3559 100644 --- a/packet_builder_yaml.asciidoc +++ b/packet_builder_yaml.asciidoc @@ -1,8 +1,29 @@ +Packet Builder Language +======================= +:author: hhaim +:email: +:revnumber: 0.02 +:quotes.++: +:numbered: + +== change log + +[options="header",cols="1,10"] +|================= +| Version | meaning +| 0.01 | first version +| 0.02 +| + +- change the bool fields to properties +- add external/internal property +- add const property ( instead cant_change) +- change TLV property - now learn the prev header +- add choice that is not base on a field ( TCP->IP->TCP) +|================= -== A file format for GUI packet builder -version : 0.01 -originator : hhaim +== A file format for GUI packet builder === Introduction @@ -75,7 +96,7 @@ The format should be *YAML* | uint64 | sub fields of this header | 64 | Field_Type | name of a field type that define | The size of the field type "mac-addr" | Payload | xref:Payload[Payload] | total packet size - all header until now -| vlen | varible length field, take the reset of the field | total size of the object +| vlen_t | in case of varible size header this include the size to the end of varible size header see example xref:IpvOption[Ipv4Option] |total size of the object |================= @@ -96,22 +117,22 @@ The format should be *YAML* |================= - - -.Default Exec anchor:Exec[] +.Default Properties anchor:Properties[] [options="header",cols="1,2"] |================= | Field Name | meaning -| none | nothing special | ipv4_checksum | auto calculates checksum on this header Ipv4 type | tcp_udp_checsum | calculate next TCP checksum -| ipv4_total_length | calculate ipv4 total length -| tlv_length | the length of the field in bytes -1 for TLV +| ipv4_total_length | calculate ipv4 total length this pkt_size = header + reset of packet +| tlv | TLV length of the header (inlcudes the prev field length) example ip-option, tcp-option +| le | little edian. deault is big +| const | const field for example the 4 version of ipv4 header - this GUI won't give option to change this field +| external | marks the header as an external header for the GUI. for example IPv4 is external header and mac-addr is internal header ( compose external header) |================= .Field_Type anchor:Field_Type[] -[options="header",cols="^1,^1,30,^1,^1,30"] +[options="header",cols="1,^1,30,^1,^1,30"] |================= | Field Name | value type | meaning | Default Value | Link | Example | class | string | describe the class type | in case class is defined no need to have name and vise versa | | class : tcp @@ -121,25 +142,64 @@ The format should be *YAML* | type | string | type, see Types define the size | "uint8_t" | xref:Types[Types] | type : "uint32_t" type : "mac_addr" | edit_type | string | edit_type Edit_Types | "none" | xref:Edit_Types[Edit_Types] | edit_type could get edit_type_regexp e.g edit_type = "ipv4" , edit_type = "regexp" edit_type_regexp = "string that define regexp and Java function" | edit_type_regexp | string | in case it is reg_exp the name of the function |"none" | xref:GenRegExp[GenRegExp] | -| edian | bool | big or little edian | true | | in default all fields are big -| default | array of bytes | default value in the packets | [0 ]x header size | | -| exec | string | exception processing by java checsum/length | none | xref:Exec[Exec] | -| choice | array | define the next protocol see | none | xref:Choice[Choice] | -| change_possible | bool | is it possible to change this field after we added next fields. in some cases we can't change field after we build next protocol | true | | +| default | array of bytes | default value in the packets , you can override value for subfields in parent see example +| [0 ]x header size | | +| properies | array of string like masks +| properies of this fields | [] | xref:Properties[Properties] | ["le","external"] , ["tlv","le","const"] + +| choice | array | define the next protocol base on a field | none | xref:Choice[Choice] | +| next_headers | string or type | a name of class that define the next or just an array | "none" | xref:Choice[Next_headers] | | fields | array | array of Field_Type | [] | | fields : [ ] -| offset | integer/string | offset into the packet in bits, in case of auto add base of prev fields | "auto" | | | +| offset | integer/string | offset into the packet in bits, in case of auto add base of prev fields | "auto" | | | option | string | a java code that define a way to calculate varible size | "none" | | | +|================= + +.Field_Type anchor:ConstHeadesClass[] +[options="header",cols="^1,^10"] +|================= +| Field Name | value type +| "root" | the root pointer to the start of blocks L2/802.3 etc +| "end" | end TLV headers +| "payload | the rest of the packets as buffer/string etc |================= + +.Next_headers anchor:Next_headers[] +Example of Next_headers +[source,python] +---- + + - class : "next-example-t-1" + help : "next-example-t-1" + next_headers : ["ip","ipv6,"tcp"] + +# option 1 define in the header itself + - class : "tcp" + help : "TCP header" + properies : ["external"] + next_headers : ["ip","ipv6,"tcp"] + fields : + - name : "ver" + +# option 2 define throw a class + - class : "tcp" + help : "TCP header" + properies : ["external"] + next_headers : "next-example-t-1" # + fields : + - name : "ver" +---- + + .Choice anchor:Choice[] Example of Choice [source,python] ---- fields : - key : 0x0800 - val : "ip" + val : "ip" # name of an external or internal class , the GUI should distinct betwean internal and external - key : 0x0860 val : "ipv6" @@ -223,10 +283,81 @@ root -> L2 ( Ethernet , 802.3 , LLC SNAP ) * There would be a spare field in the Stream object so GUI could add more metadata for reconstructing the builder types for example in this example Ethrenet/IP/TCP/IP/TCP you can't extrac from buffer alone that Payload is IP/TCP only the builder known that in build time. * Ip total length need to keep the total_pkt_size - this ip header . this should work for internal header too. +* When GUI add header ("external") the total size of this header should be calculated ( varible size should be given a default - ipv4) + + +=== Examples +==== TLV (Ip option) anchor:IpvOption[] -=== Example TCP/IP + +IP-option see link:http://tools.ietf.org/html/rfc791[ip_option] + +0 : END +1 : Length 1 +other : Byte : Length ( +first) |option + + + + +[source,python] +---- + + - class : "ip_option_131" + help : "ip_option" + fields : + - name : "length" # tree with leaf of bits + help : "length" + type : uint8_t + properties : ["tlv"] # the length include the prev field size (8 byte) + + - name : "pointer" # tree with leaf of bits + type : uint8_t + + - name : "buffer" # tree with leaf of bits + type : "tlv_reset" + + - class : "default_ip4_option_tlv" + help : "ip_option" + fields : + - name : "length" # tree with leaf of bits + help : "length" + type : uint8_t + properties : "tlv" # the length include the prev field size (8 byte) + + - name : "buffer" # tree with leaf of bits + type : "vlen_t" + + + - class : "ip_option" + help : "ip_option" + type : uint8_t + default : [0x01] + choice : + fields: + - key : 0x00 + val : "end" # reserve name for ending the loop + + - key : 0x01 + val : "ip_option" # back to this header + + - key : 0x131 + val : "ip_option_131" + + - key : 0x0812 + val : "gre" + + default : "default_ip4_option_tlv" + + +---- + +* case of varible length field ip_option example + + + +==== Example TCP/IP [source,python] @@ -237,7 +368,7 @@ root -> L2 ( Ethernet , 802.3 , LLC SNAP ) type : "uint8" array_size : 6 edit_type : "mac-addr_t" # format (\d\d[:]){5}[:]\d\d - default_value : [0x00,0x00,0x01,0x00,0x00,0x00] + default : [0x00,0x00,0x01,0x00,0x00,0x00] - class : "c-ethr-l2" @@ -253,7 +384,7 @@ root -> L2 ( Ethernet , 802.3 , LLC SNAP ) - name : "ip_protocol" type : "uint16_t" - default_value : [0x08,0x00] + default : [0x08,0x00] choice : fields : - key : 0x0800 @@ -284,26 +415,24 @@ root -> L2 ( Ethernet , 802.3 , LLC SNAP ) type : "bit" array_size : 4 default : [7] - exec : "ipv4_ihl" - change_possible : false + properties : ["ipv4_ihl","const"] .. - name : "hdr_chsum" help : "Header Checksum" default : [0x00,0x00] - exec : "ipv4_check_sum" - change_possible : false + properties : ["ipv4_check_sum"] - name : "total_len" help : "Total Length" default : [0x00,0x00] - exec : "ipv4_total_len" # auto calculate total_size-offset_header + properties : ["ipv4_total_len"] # auto calculate total_size-offset_header - name : "protocol" help : "Protocol" type : uint8_t - default_value : [0x06] + default : [0x06] choice : fields: - key : 0x06 @@ -313,7 +442,7 @@ root -> L2 ( Ethernet , 802.3 , LLC SNAP ) val : "udp" - key : 0x01 # just an example don't realy remember the numbers - val : "ip" + val : "ip" # class name - key : 0x0812 val : "gre" @@ -334,6 +463,7 @@ root -> L2 ( Ethernet , 802.3 , LLC SNAP ) - class : "tcp" help : "TCP" + properties : ["external"] fields : - name : "src_port" help : "Source Port" @@ -377,7 +507,7 @@ root -> L2 ( Ethernet , 802.3 , LLC SNAP ) help : "Checksum" type : uint16_t default : [0x00,0x00] - exec : "tcp_checksum" # auto calculate total_size-offset_header + properties : ["tcp_checksum"] # auto calculate total_size-offset_header - class : "root" # reserve @@ -386,62 +516,58 @@ root -> L2 ( Ethernet , 802.3 , LLC SNAP ) default : [ "ethrenet" ,"llc","_802-3"] --------------------------- -=== Example IP Option - -see here TLV - -0 : END -1 : Length 1 -other : Byte : Length ( +first) |option - link:http://tools.ietf.org/html/rfc791[ip_option] +==== Overide subfields +In this example parent class default value override default value of sub-fields ( 2 diffrent mac-addr) [source,python] ---- - - class : "ip_option_131" - help : "ip_option" - fields : - - name : "length" # tree with leaf of bits - help : "length" - type : uint8_t - exec : "tlv_length" - - - name : "pointer" # tree with leaf of bits - type : uint8_t - - - name : "buffer" # tree with leaf of bits - type : "tlv_reset" - - - - class : "ip_option" - help : "ip_option" - type : uint8_t - default_value : [0x01] - choice : - fields: - - key : 0x00 - val : "none" # no next the parent has - - - key : 0x01 - val : "ip_option" - - - key : 0x131 - val : "ip_option_131" - - - key : 0x01 # just an example don't realy remember the numbers - val : "ip" - - - key : 0x0812 - val : "gre" - default : "payload" - + - class : "c-mac-addr" + help : "Mac addrees" + type : "uint8" + array_size : 6 + edit_type : "mac-addr_t" # format (\d\d[:]){5}[:]\d\d + default : [0x00,0x00,0x01,0x00,0x00,0x00] + + - class : "c-ethr-l2" + help : "Ethernet-L2" + properties : ["external"] + default : [0x00,0x01,0x01,0x00,0x00,0x00, 0x00,0x02,0x02,0x00,0x00,0x00 ,0x08,00] # change the default of sub-fields . it is const size + fields : + - name : "Dst" + help : "destination mac" + type : "c-mac-addr" + + - name : "Src" + help : "source mac" + type : "c-mac-addr" + + - name : "ip_protocol" + type : "uint16_t" + default : [0x08,0x00] + choice : + fields : + - key : 0x0800 + val : "ip" + + - key : 0x0860 + val : "ipv6" + + - key : 0x0810 # just an example don't realy remember the numbers + val : "vlan" + + - key : 0x0812 + val : "mpls" + default : "payload" ---- -* case of varible length field ip_option example +==== Union base + +TBD -- cgit 1.2.3-korg From b94a71ceed430025c10e81cadedb59b5958850bd Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 6 Sep 2015 05:03:18 +0300 Subject: minor - fix the table --- packet_builder_yaml.asciidoc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc index f8dd3559..6b840416 100644 --- a/packet_builder_yaml.asciidoc +++ b/packet_builder_yaml.asciidoc @@ -8,13 +8,13 @@ Packet Builder Language == change log -[options="header",cols="1,10"] +[options="header",cols="^1,^h,a"] |================= -| Version | meaning -| 0.01 | first version -| 0.02 +| Version | meaning | name +| 0.01 | hhaim | +- first version +| 0.02 | hhaim | - - change the bool fields to properties - add external/internal property - add const property ( instead cant_change) -- cgit 1.2.3-korg From 23c179427c659c3d6e067c34136ee4de8352fb1a Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 6 Sep 2015 05:11:16 +0300 Subject: another minor change --- packet_builder_yaml.asciidoc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc index 6b840416..e545ef06 100644 --- a/packet_builder_yaml.asciidoc +++ b/packet_builder_yaml.asciidoc @@ -10,7 +10,7 @@ Packet Builder Language [options="header",cols="^1,^h,a"] |================= -| Version | meaning | name +| Version | name | meaning | 0.01 | hhaim | - first version | 0.02 | hhaim @@ -164,8 +164,7 @@ The format should be *YAML* | "payload | the rest of the packets as buffer/string etc |================= - - + .Next_headers anchor:Next_headers[] Example of Next_headers [source,python] -- cgit 1.2.3-korg From 255ed8db801d6f15549435743df37283090f33f9 Mon Sep 17 00:00:00 2001 From: imarom Date: Sun, 6 Sep 2015 13:17:14 +0300 Subject: added more types to RPC asciidoc --- trex_rpc_server_spec.asciidoc | 166 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 166 insertions(+) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 13d86c48..f4b06bb7 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -187,3 +187,169 @@ Server Started ---- +== RPC Commands +The following RPC commands are supported + +=== Ping +* *Name* - 'ping' +* *Description* - Pings the TRex server +* *Paramters* - None +* *Result* - "ACK" On Sucess + +Example: + +[source,bash] +---- +'Request': + +{ + "jsonrpc": "2.0", + "id": 1, + "method": "ping", + "params": null +} + +'Response': + +{ + "jsonrpc" : "2.0", + "id" : 1, + "result" : "ACK" +} + +---- + +=== Get Registered Commands +* *Name* - 'get_reg_cmds' +* *Description* - Queries the server for all the registered commands +* *Paramters* - None +* *Result* - A list of all the supported commands by the server + +Example: + +[source,bash] +---- +'Request': + +{ + "jsonrpc": "2.0", + "id": 1, + "method": "get_reg_cmds", + "params": null +} + + +'Response': + +{ + "jsonrpc": "2.0", + "id": 1, + "result": [ + "remove_all_streams", + "remove_stream", + "add_stream", + "get_reg_cmds", + "ping", + "test_sub", + "get_status", + "test_add" + ] +} + +---- + + +=== Get Status +* *Name* - 'get_status' +* *Description* - Queries the server for general information +e.g.: user owning the device, number of ports configured +* *Paramters* - None +* *Result* - An object of all the supported commands by the server. + ++++Result Details:+++ + +'general.version' - TRex version. + +'general.build_date' - build date. + +'general.build_time' - build time. + +'general.built_by' - who built this version + +'general.uptime' - uptime of the server + +'general.owner' - user currently owning the device or 'none' if no one has taken ownership. + +{zwsp} + +'ports.count' - number of ports available on the server. + +=== Add Stream +* *Name* - 'add_stream' +* *Description* - Adds a stream to a port +* *Paramters* - Object of type 'stream' - see below +* *Result* - ACK in case of success + +==== Object type 'stream' + +Add_stream gets a single parameter of type object. + +The format of that object is as follows: + +.Object type 'stream' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| port_id | int | The port that this stream will be assoiciated with. Valid number is in the range got from get_status : 'ports.count' +| stream_id | int | unique (per port) identifier for this stream +| enabled | boolean | is this stream enabled ? +| self_start | boolean | is this stream triggered by starting injection or triggered by another stream ? +| isg | double | inter stream gap - delay time until the stream is started +| next_stream | int | next stream to start after this stream. -1 means stop after this stream +| packet | object | object of type xref:packet_obj['packet'] +| mode | object | object of type xref:mode_obj['mode'] +| vm | object | object of type xref:vm_obj['vm'] +| rx_stats | object | object of type xref:rx_stats_obj['rx_stats'] +|================= + +===== Object type 'packet' anchor:packet_obj[] +packet contains binary and meta data + +.Object type 'packet' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| binary | byte array | binary dump of the packet to be used in the stream as array of bytes +| meta | string | meta data object. opaque to the RPC server. will be passed on queries +|================= + +===== Object type 'mode' anchor:mode_obj[] +mode object can be one of the following objects: + +.Object type 'mode - continuous' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | ''continuous'' +| pps | int | rate in packets per second +|================= + +.Object type 'mode - single_burst' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | ''single_burst'' +| pps | int | rate in packets per second +| total pkts | int | total packets in the burst +|================= + +.Object type 'mode - multi_burst' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | ''multi_burst'' +| pps | int | rate in packets per second +| pkts_per_burst | int | packets in a single burst +| ibg | double | inter burst gap. delay between bursts +| count | int | number of bursts. ''0'' means loop forever, ''1'' will fall back to single burst +|================= + -- cgit 1.2.3-korg From 2f7368b1e4c75b0e1154e34aeebf87218562c2db Mon Sep 17 00:00:00 2001 From: imarom Date: Sun, 6 Sep 2015 16:04:02 +0300 Subject: added more commands to the document --- trex_rpc_server_spec.asciidoc | 162 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 152 insertions(+), 10 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index f4b06bb7..6199ff24 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -1,6 +1,6 @@ The TRex RPC Server =================== -:author: imarom +:author: Itay Marom :email: :revnumber: 1.70-0.0 :quotes.++: @@ -286,10 +286,13 @@ e.g.: user owning the device, number of ports configured === Add Stream * *Name* - 'add_stream' * *Description* - Adds a stream to a port -* *Paramters* - Object of type 'stream' - see below +* *Paramters* +** *port_id* - port id associated with this stream +** *stream_id* - stream id associated with the stream object +** *stream* - object of type xref:stream_obj['stream'] * *Result* - ACK in case of success -==== Object type 'stream' +==== Object type 'stream' anchor:stream_obj[] Add_stream gets a single parameter of type object. @@ -299,11 +302,9 @@ The format of that object is as follows: [options="header",cols="1,1,3"] |================= | Field | Type | Description -| port_id | int | The port that this stream will be assoiciated with. Valid number is in the range got from get_status : 'ports.count' -| stream_id | int | unique (per port) identifier for this stream -| enabled | boolean | is this stream enabled ? -| self_start | boolean | is this stream triggered by starting injection or triggered by another stream ? -| isg | double | inter stream gap - delay time until the stream is started +| enabled | boolean | is this stream enabled +| self_start | boolean | is this stream triggered by starting injection or triggered by another stream +| isg | double | ['usec'] inter stream gap - delay time in usec until the stream is started | next_stream | int | next stream to start after this stream. -1 means stop after this stream | packet | object | object of type xref:packet_obj['packet'] | mode | object | object of type xref:mode_obj['mode'] @@ -323,7 +324,7 @@ packet contains binary and meta data |================= ===== Object type 'mode' anchor:mode_obj[] -mode object can be one of the following objects: +mode object can be 'one' of the following objects: .Object type 'mode - continuous' [options="header",cols="1,1,3"] @@ -349,7 +350,148 @@ mode object can be one of the following objects: | type | string | ''multi_burst'' | pps | int | rate in packets per second | pkts_per_burst | int | packets in a single burst -| ibg | double | inter burst gap. delay between bursts +| ibg | double | ['usec'] inter burst gap. delay between bursts in usec | count | int | number of bursts. ''0'' means loop forever, ''1'' will fall back to single burst |================= +===== Object type 'vm' anchor:vm_obj[] +Describes the VM instructions to be used with this stream + +.Object type 'vm' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +|================= + +===== Object type 'rx_stats' anchor:rx_stats_obj[] +Describes rx stats for the stream + +.Object type 'rx_stats' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| enabled | boolean | is rx_stats enabled for this stream +| rx_stream_id | int | +| seq_enabled | boolean | should write 32 bit sequence +| latency_enabled | boolean | should write 32 bit latency +|================= + +[source,bash] +---- + +'Request': + +{ + "id": 1, + "jsonrpc": "2.0", + "method": "add_stream", + "params": { + "port_id": 1, + "stream_id": 502 + "stream": { + "enabled": true, + "isg": 4.3, + "mode": { + "pps": 3, + "total_pkts": 5000, + "type": "single_burst" + }, + "next_stream_id": -1, + "packet": { + "binary": [ + 4, + 1, + 255 + ], + "meta": "" + }, + "rx_stats": { + "enabled": false + }, + "self_start": true, + } + } +} + +'Response': + +{ + "id": 1, + "jsonrpc": "2.0", + "result": "ACK" +} + + +---- + + +=== Remove Stream +* *Name* - 'remove_stream' +* *Description* - Removes a stream from a port +* *Paramters* +** *port_id* - port assosicated with the stream. +** *stream_id* - stream to remove + +* *Result* - ACK in case of success + +[source,bash] +---- + +'Request': + +{ + "id": 1 + "jsonrpc": "2.0", + "method": "remove_stream", + "params": { + "port_id": 1, + "stream_id": 502 + } +} + + +'Response': + +{ + "id": 1 + "jsonrpc": "2.0", + "result": "ACK" +} + +---- + +=== Get Stream ID List +* *Name* - 'get_stream_list' +* *Description* - fetch all the assoicated streams for a port +* *Paramters* +** *port_id* - port to query for registered streams + +* *Result* - array of 'stream_id' + +[source,bash] +---- + +'Request': + +{ + "id": 1, + "jsonrpc": "2.0", + "method": "get_stream_list", + "params": { + "port_id": 1 + } +} + +'Response': + +{ + "id": 1, + "jsonrpc": "2.0", + "result": [ + 502, + 18 + ] +} + + +---- -- cgit 1.2.3-korg From 718704a561d510cfbb52161d5b0c67a773221bb4 Mon Sep 17 00:00:00 2001 From: imarom Date: Sun, 6 Sep 2015 17:58:18 +0300 Subject: more documentation on RPC --- trex_rpc_server_spec.asciidoc | 157 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 157 insertions(+) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 6199ff24..ceeb1f9a 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -495,3 +495,160 @@ Describes rx stats for the stream ---- + +=== Get Stream +* *Name* - 'get_stream' +* *Description* - get a specific stream object +* *Paramters* +** *port_id* - port for the associated stream +** *stream_id* - the requested stream id + +* *Result* - object xref:stream_obj['stream'] + +[source,bash] +---- + +'Request': + +{ + "id": 1, + "jsonrpc": "2.0", + "method": "get_stream", + "params": { + "port_id": 1, + "stream_id": 7 + } +} + + +'Response': + +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "stream": { + "enabled": true, + "isg": 4.3, + "mode": { + "pps": 3, + "type": "continuous" + }, + "next_stream_id": -1, + "packet": { + "binary": [ + 4, + 1, + 255 + ], + "meta": "" + }, + "self_start": true + } + } +} + +---- + + +=== Remove All Streams +* *Name* - 'remove_all_streams' +* *Description* - remove all streams from a port +* *Paramters* +** *port_id* - port for the associated stream + +* *Result* - "ACK" on success + + +[source,bash] +---- + +'Request': + +{ + "id": 1, + "jsonrpc": "2.0", + "method": "remove_all_streams", + "params": { + "port_id": 2 + } +} + +'Response': + +{ + "id": 1, + "jsonrpc": "2.0", + "result": "ACK" +} + + +---- + + +=== Start Traffic +* *Name* - 'start_traffic' +* *Description* - Starts the traffic on a specific port. if traffic has already started an error will be returned +* *Paramters* +** *port_id* - port for starting the traffic + +* *Result* - "ACK" on success + +[source,bash] +---- + +'Request': + +{ + "id": "b3llt8hs", + "jsonrpc": "2.0", + "method": "start_traffic", + "params": { + "port_id": 3 + } + +'Response': + +{ + "id": "b3llt8hs", + "jsonrpc": "2.0", + "result": "ACK" +} + + +---- + +=== Stop Traffic +* *Name* - 'stop_traffic' +* *Description* - Stops the traffic on a specific port. if the port has already started nothing will happen +* *Paramters* +** *port_id* - port for stopping the traffic + +* *Result* - "ACK" on success + +[source,bash] +---- + +'Request': + +{ + "id": "h2fyhni7", + "jsonrpc": "2.0", + "method": "stop_traffic", + "params": { + "port_id": 3 + } +} + +'Response': + +{ + "id": "h2fyhni7", + "jsonrpc": "2.0", + "result": "ACK" +} + + +---- + + -- cgit 1.2.3-korg From 12c88095472fff2ac6372676585fc5604b0f9fec Mon Sep 17 00:00:00 2001 From: imarom Date: Tue, 8 Sep 2015 02:11:15 +0300 Subject: VM objects --- trex_rpc_server_spec.asciidoc | 35 ++++++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index ceeb1f9a..aeff5dce 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -308,7 +308,7 @@ The format of that object is as follows: | next_stream | int | next stream to start after this stream. -1 means stop after this stream | packet | object | object of type xref:packet_obj['packet'] | mode | object | object of type xref:mode_obj['mode'] -| vm | object | object of type xref:vm_obj['vm'] +| vm | array | array of objects of type xref:vm_obj['vm'] | rx_stats | object | object of type xref:rx_stats_obj['rx_stats'] |================= @@ -355,14 +355,43 @@ mode object can be 'one' of the following objects: |================= ===== Object type 'vm' anchor:vm_obj[] -Describes the VM instructions to be used with this stream +Array of VM instruction objects to be used with this stream -.Object type 'vm' +Any element in the array can be one of the following object types: + +.Object type 'vm - fix_checksum_ipv4' [options="header",cols="1,1,3"] |================= | Field | Type | Description +| type | string | ''fix_checksum_ipv4'' +| pkt_offset | uint16 | offset of the field to fix +|================= + +.Object type 'vm - flow_var' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | ''flow_var''' +| name | string | flow var name - this should be a unique identifier +| size | [1,2,4,8] | size of the flow var in bytes +| op | ['inc', 'dec', 'random'] | operation type to perform on the field +| init value | uint64_t as string | init value for the field +| min value | uint64_t as string | minimum value for the field +| max value | uint64_t as string | maximum value for the field |================= +.Object type 'vm - write_flow_var' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | ''write_flow_var'' +| name | string | flow var name to write +| pkt_offset | uint16 | offset at the packet to perform the write +| add_value | int | delta to add to the field prior to writing - can be negative +| is_big_endian | boolean | should write as big endian or little +|================= + + ===== Object type 'rx_stats' anchor:rx_stats_obj[] Describes rx stats for the stream -- cgit 1.2.3-korg From 8436bbe899bdf9e3e58661f94602e29f67cca213 Mon Sep 17 00:00:00 2001 From: imarom Date: Wed, 9 Sep 2015 18:01:49 +0300 Subject: another version --- images/rpc_states.png | Bin 0 -> 24137 bytes trex_rpc_server_spec.asciidoc | 216 ++++++++++++++++++++++++++++++++++++++---- 2 files changed, 196 insertions(+), 20 deletions(-) create mode 100644 images/rpc_states.png diff --git a/images/rpc_states.png b/images/rpc_states.png new file mode 100644 index 00000000..57c0ac17 Binary files /dev/null and b/images/rpc_states.png differ diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index aeff5dce..58f9fea1 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -187,11 +187,32 @@ Server Started ---- +== RPC Server State Machine +The RPC server can be in numbered of states, each state provides other subset of the commands +that are allowed to be executed. + +We define the following possible states: + +* *unowned* - The server is either unowned or another user is owning the device +* *owned* - The server has been acquired by the client +* *active* - The server is in the middle of injecting traffic - currently active + +Each command will specify on which states it is possible to execute it. + +For commands valid only on 'owned' or 'active', a field called ''handler'' 'MUST' be passed +along with the rest of the parameters. + + +This will identify the connection. + +image::images/rpc_states.png[title="RPC Server States",align="left",width=200, link="images/rpc_states.png"] + == RPC Commands The following RPC commands are supported === Ping * *Name* - 'ping' +* *Valid States* - 'all' * *Description* - Pings the TRex server * *Paramters* - None * *Result* - "ACK" On Sucess @@ -221,6 +242,7 @@ Example: === Get Registered Commands * *Name* - 'get_reg_cmds' +* *Valid States* - 'all' * *Description* - Queries the server for all the registered commands * *Paramters* - None * *Result* - A list of all the supported commands by the server @@ -251,7 +273,7 @@ Example: "get_reg_cmds", "ping", "test_sub", - "get_status", + "get_version", "test_add" ] } @@ -259,32 +281,154 @@ Example: ---- -=== Get Status -* *Name* - 'get_status' -* *Description* - Queries the server for general information -e.g.: user owning the device, number of ports configured +=== Get Version +* *Name* - 'get_version' +* *Valid States* - 'all' +* *Description* - Queries the server for version information * *Paramters* - None -* *Result* - An object of all the supported commands by the server. +* *Result* - See table below + +.Object type 'return values for get_version' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| version | string | TRex version +| build_date | string | build date +| build_time | string | build time +| built_by | string | who built this version +|================= -+++Result Details:+++ +=== Get System Info +* *Name* - 'get_system_info' +* *Description* - Queries the server for system properties +* *Paramters* - None +* *Result* - See table below -'general.version' - TRex version. +.return value: 'get_system_info' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| dp_core_count | int | DP core count +| core_type | string | DP core type +| hostname | string | machine host name +| ip | string | machine IP +| uptime | string | uptime of the server +| port_count | int | number of ports on the machine +| ports | array | arary of object ''port'' - see below +|================= -'general.build_date' - build date. +.return value: 'get_system_info'.'port' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| driver | string | driver type +| speed | string | speed of the port (1g, 10g, 40g, 100g) +| status | string | up / down +|================= -'general.build_time' - build time. -'general.built_by' - who built this version +=== Get Owner +* *Name* - 'get_owner' +* *Valid States* - 'all' +* *Description* - Queries the server for current owner +* *Paramters* - None +* *Result* ['string'] - owner name if exists, otherwise 'none' -'general.uptime' - uptime of the server +[source,bash] +---- -'general.owner' - user currently owning the device or 'none' if no one has taken ownership. +'Request': + +{ + "id": "hxjkuwj9", + "jsonrpc": "2.0", + "method": "get_owner", + "params": null +} + +'Response': + +{ + "id": "hxjkuwj9", + "jsonrpc": "2.0", + "result": { + "owner": "itay" + } +} + +---- + +=== Acquire +* *Name* - 'Acquire' +* *Valid States* - 'all' +* *Description* - Takes ownership on the device. +* *Paramters* - +** *user* ['string'] - User name aquiring the system +** *force* ['boolean'] - force action even if another user is holding the device +* *Result* ['string'] - 'unique' identifier for future requests + +[source,bash] +---- + +'Request': + +{ + "id": "b1tr56yz", + "jsonrpc": "2.0", + "method": "Acquire", + "params": { + "force": false, + "user": "itay" + } +} + + +'Response': + +{ + "id": "b1tr56yz", + "jsonrpc": "2.0", + "result": "4cBWDxS2" +} +---- + + +=== Release +* *Name* - 'release' +* *Valid States* - 'owned' +* *Description* - Release owernship over the device +* *Paramters* - +** *handler* ['string'] - unique identifier for the message +* *Result* ['string'] - "ACK" on success + +[source,bash] +---- + +'Request': + +{ + "id": "m785dxwd", + "jsonrpc": "2.0", + "method": "release", + "params": { + "handler": "37JncCHr" + } +} + + +'Response': + +{ + "id": "m785dxwd", + "jsonrpc": "2.0", + "result": "ACK" +} +---- -{zwsp} + -'ports.count' - number of ports available on the server. === Add Stream * *Name* - 'add_stream' +* *Valid States* - 'owned' * *Description* - Adds a stream to a port * *Paramters* ** *port_id* - port id associated with this stream @@ -331,7 +475,7 @@ mode object can be 'one' of the following objects: |================= | Field | Type | Description | type | string | ''continuous'' -| pps | int | rate in packets per second +| pps | double | rate in packets per second |================= .Object type 'mode - single_burst' @@ -339,7 +483,7 @@ mode object can be 'one' of the following objects: |================= | Field | Type | Description | type | string | ''single_burst'' -| pps | int | rate in packets per second +| pps | double | rate in packets per second | total pkts | int | total packets in the burst |================= @@ -395,12 +539,38 @@ Any element in the array can be one of the following object types: ===== Object type 'rx_stats' anchor:rx_stats_obj[] Describes rx stats for the stream +{zwsp} + + +'IMPORTANT': + +In case rx_stats is enabled, meta data will be written in the end of the packet. + +please consider the following: + +==== Constrains +* *performance* - this will have performance impact as rx packets will be examined +* *override* - up to 10 bytes at the end of the packet will be overidden by the meta data required + +==== The bytes needed for activating 'rx_stats': + +* *rx_stream_id* consumes 2 bytes +* *seq_enabled* consumes 4 bytes +* *latency_enabled* consumes 4 bytes + +so if no seq or latency are enabled 2 bytes will be used. + + +if seq or latency alone are enabled, 6 bytes will be used. + + +if both are enabled then 10 bytes will be used. + + .Object type 'rx_stats' [options="header",cols="1,1,3"] |================= | Field | Type | Description | enabled | boolean | is rx_stats enabled for this stream -| rx_stream_id | int | | seq_enabled | boolean | should write 32 bit sequence | latency_enabled | boolean | should write 32 bit latency |================= @@ -456,6 +626,7 @@ Describes rx stats for the stream === Remove Stream * *Name* - 'remove_stream' +* *Valid States* - 'owned' * *Description* - Removes a stream from a port * *Paramters* ** *port_id* - port assosicated with the stream. @@ -491,6 +662,7 @@ Describes rx stats for the stream === Get Stream ID List * *Name* - 'get_stream_list' +* *Valid States* - 'owned', 'active' * *Description* - fetch all the assoicated streams for a port * *Paramters* ** *port_id* - port to query for registered streams @@ -527,6 +699,7 @@ Describes rx stats for the stream === Get Stream * *Name* - 'get_stream' +* *Valid States* - 'owned', 'active' * *Description* - get a specific stream object * *Paramters* ** *port_id* - port for the associated stream @@ -582,6 +755,7 @@ Describes rx stats for the stream === Remove All Streams * *Name* - 'remove_all_streams' +* *Valid States* - 'owned' * *Description* - remove all streams from a port * *Paramters* ** *port_id* - port for the associated stream @@ -617,9 +791,10 @@ Describes rx stats for the stream === Start Traffic * *Name* - 'start_traffic' +* *Valid States* - 'owned' * *Description* - Starts the traffic on a specific port. if traffic has already started an error will be returned * *Paramters* -** *port_id* - port for starting the traffic +** *port_id* - port for starting the traffic, -1 for starting all the ports * *Result* - "ACK" on success @@ -649,9 +824,10 @@ Describes rx stats for the stream === Stop Traffic * *Name* - 'stop_traffic' +* *Valid States* - 'active' * *Description* - Stops the traffic on a specific port. if the port has already started nothing will happen * *Paramters* -** *port_id* - port for stopping the traffic +** *port_id* - port for stopping the traffic, -1 for stopping all the ports * *Result* - "ACK" on success -- cgit 1.2.3-korg From 75132c65c2927edec9b6beaccf2224561af5bb64 Mon Sep 17 00:00:00 2001 From: imarom Date: Wed, 9 Sep 2015 20:06:50 -0400 Subject: added handler for every 'owned' or 'active' command changed start_traffic / stop_traffic to have array of port id --- trex_rpc_server_spec.asciidoc | 68 ++++++++++++++++++++++++++----------------- 1 file changed, 41 insertions(+), 27 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 58f9fea1..95e39820 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -138,7 +138,7 @@ TRex > ping "id": "l0tog11a", "jsonrpc": "2.0", "method": "ping", - "params": {} + "params": null } [verbose] Server Response: @@ -174,7 +174,7 @@ Server Started "id" : "maa5a3g1", "jsonrpc" : "2.0", "method" : "ping", - "params" : {} + "params" : null } [verbose][req resp] Server Replied: @@ -215,7 +215,7 @@ The following RPC commands are supported * *Valid States* - 'all' * *Description* - Pings the TRex server * *Paramters* - None -* *Result* - "ACK" On Sucess +* *Result* ['string'] - "ACK" On Sucess Example: @@ -245,7 +245,7 @@ Example: * *Valid States* - 'all' * *Description* - Queries the server for all the registered commands * *Paramters* - None -* *Result* - A list of all the supported commands by the server +* *Result* ['array'] - A list of all the supported commands by the server Example: @@ -286,7 +286,7 @@ Example: * *Valid States* - 'all' * *Description* - Queries the server for version information * *Paramters* - None -* *Result* - See table below +* *Result* ['object'] - See table below .Object type 'return values for get_version' [options="header",cols="1,1,3"] @@ -302,7 +302,7 @@ Example: * *Name* - 'get_system_info' * *Description* - Queries the server for system properties * *Paramters* - None -* *Result* - See table below +* *Result* ['object'] - See table below .return value: 'get_system_info' [options="header",cols="1,1,3"] @@ -365,7 +365,7 @@ Example: * *Paramters* - ** *user* ['string'] - User name aquiring the system ** *force* ['boolean'] - force action even if another user is holding the device -* *Result* ['string'] - 'unique' identifier for future requests +* *Result* ['string'] - 'unique' connection handler for future requests [source,bash] ---- @@ -398,7 +398,7 @@ Example: * *Valid States* - 'owned' * *Description* - Release owernship over the device * *Paramters* - -** *handler* ['string'] - unique identifier for the message +** *handler* ['string'] - unique connection handler * *Result* ['string'] - "ACK" on success [source,bash] @@ -431,10 +431,11 @@ Example: * *Valid States* - 'owned' * *Description* - Adds a stream to a port * *Paramters* -** *port_id* - port id associated with this stream -** *stream_id* - stream id associated with the stream object +** *handler* ['string'] - unique connection handler +** *port_id* ['int'] - port id associated with this stream +** *stream_id* ['int'] - stream id associated with the stream object ** *stream* - object of type xref:stream_obj['stream'] -* *Result* - ACK in case of success +* *Result* ['string'] - "ACK" in case of success ==== Object type 'stream' anchor:stream_obj[] @@ -585,6 +586,7 @@ if both are enabled then 10 bytes will be used. "jsonrpc": "2.0", "method": "add_stream", "params": { + "handler": "37JncCHr", "port_id": 1, "stream_id": 502 "stream": { @@ -629,10 +631,11 @@ if both are enabled then 10 bytes will be used. * *Valid States* - 'owned' * *Description* - Removes a stream from a port * *Paramters* -** *port_id* - port assosicated with the stream. -** *stream_id* - stream to remove +** *handler* ['string'] - unique connection handler +** *port_id* ['int'] - port assosicated with the stream. +** *stream_id* ['int'] - stream to remove -* *Result* - ACK in case of success +* *Result* ['string'] - "ACK" in case of success [source,bash] ---- @@ -644,6 +647,7 @@ if both are enabled then 10 bytes will be used. "jsonrpc": "2.0", "method": "remove_stream", "params": { + "handler": "37JncCHr", "port_id": 1, "stream_id": 502 } @@ -665,9 +669,10 @@ if both are enabled then 10 bytes will be used. * *Valid States* - 'owned', 'active' * *Description* - fetch all the assoicated streams for a port * *Paramters* -** *port_id* - port to query for registered streams +** *handler* ['string'] - unique connection handler +** *port_id* ['int'] - port to query for registered streams -* *Result* - array of 'stream_id' +* *Result* ['array'] - array of 'stream_id' [source,bash] ---- @@ -679,6 +684,7 @@ if both are enabled then 10 bytes will be used. "jsonrpc": "2.0", "method": "get_stream_list", "params": { + "handler": "37JncCHr", "port_id": 1 } } @@ -702,10 +708,11 @@ if both are enabled then 10 bytes will be used. * *Valid States* - 'owned', 'active' * *Description* - get a specific stream object * *Paramters* -** *port_id* - port for the associated stream -** *stream_id* - the requested stream id +** *handler* ['string'] - unique connection handler +** *port_id* ['int'] - port for the associated stream +** *stream_id* ['int'] - the requested stream id -* *Result* - object xref:stream_obj['stream'] +* *Result* ['object'] - object xref:stream_obj['stream'] [source,bash] ---- @@ -717,6 +724,7 @@ if both are enabled then 10 bytes will be used. "jsonrpc": "2.0", "method": "get_stream", "params": { + "handler": "37JncCHr", "port_id": 1, "stream_id": 7 } @@ -758,9 +766,10 @@ if both are enabled then 10 bytes will be used. * *Valid States* - 'owned' * *Description* - remove all streams from a port * *Paramters* -** *port_id* - port for the associated stream +** *handler* ['string'] - unique connection handler +** *port_id* ['int'] - port for the associated stream -* *Result* - "ACK" on success +* *Result* ['string'] - "ACK" on success [source,bash] @@ -773,6 +782,7 @@ if both are enabled then 10 bytes will be used. "jsonrpc": "2.0", "method": "remove_all_streams", "params": { + "handler": "37JncCHr", "port_id": 2 } } @@ -794,9 +804,10 @@ if both are enabled then 10 bytes will be used. * *Valid States* - 'owned' * *Description* - Starts the traffic on a specific port. if traffic has already started an error will be returned * *Paramters* -** *port_id* - port for starting the traffic, -1 for starting all the ports +** *handler* ['string'] - unique connection handler +** *port_id* ['array'] - array of port id on which to start traffic -* *Result* - "ACK" on success +* *Result* ['string'] - "ACK" on success [source,bash] ---- @@ -808,7 +819,8 @@ if both are enabled then 10 bytes will be used. "jsonrpc": "2.0", "method": "start_traffic", "params": { - "port_id": 3 + "handler": "37JncCHr", + "port_id": [3, 4] } 'Response': @@ -827,9 +839,10 @@ if both are enabled then 10 bytes will be used. * *Valid States* - 'active' * *Description* - Stops the traffic on a specific port. if the port has already started nothing will happen * *Paramters* -** *port_id* - port for stopping the traffic, -1 for stopping all the ports +** *handler* ['string'] - unique connection handler +** *port_id* ['array'] - array of port id on which to stop traffic -* *Result* - "ACK" on success +* *Result* ['string'] - "ACK" on success [source,bash] ---- @@ -841,7 +854,8 @@ if both are enabled then 10 bytes will be used. "jsonrpc": "2.0", "method": "stop_traffic", "params": { - "port_id": 3 + "handler": "37JncCHr", + "port_id": [3, 4] } } -- cgit 1.2.3-korg From 1296b571afbb5fa843fff152016bdb5480ca3c02 Mon Sep 17 00:00:00 2001 From: imarom Date: Wed, 9 Sep 2015 21:54:32 -0400 Subject: added Dan's illustration picture --- images/rpc_server_big_picture.png | Bin 0 -> 54566 bytes trex_rpc_server_spec.asciidoc | 6 ++++++ 2 files changed, 6 insertions(+) create mode 100644 images/rpc_server_big_picture.png diff --git a/images/rpc_server_big_picture.png b/images/rpc_server_big_picture.png new file mode 100644 index 00000000..dae6976d Binary files /dev/null and b/images/rpc_server_big_picture.png differ diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 95e39820..6f3e21e9 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -187,6 +187,12 @@ Server Started ---- +== RPC Server Component Position Illustration + +The following diagram illustres the RPC server component's place: + +image::images/rpc_server_big_picture.png[title="RPC Server Position",align="left",width=800, link="images/rpc_server_big_picture.png"] + == RPC Server State Machine The RPC server can be in numbered of states, each state provides other subset of the commands that are allowed to be executed. -- cgit 1.2.3-korg From 76ccb03154e6adeaf24c0fbab9e3a316d7d35575 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Thu, 10 Sep 2015 07:33:23 +0300 Subject: packet_builder_yaml.asciidoc version 1.03 --- images/checkbox.jpg | Bin 0 -> 2653 bytes images/combo_button_choosing.jpg | Bin 0 -> 17359 bytes images/combo_button_editing.jpg | Bin 0 -> 3029 bytes packet_builder_yaml.asciidoc | 1259 +++++++++++++++++++++----------------- 4 files changed, 682 insertions(+), 577 deletions(-) create mode 100755 images/checkbox.jpg create mode 100755 images/combo_button_choosing.jpg create mode 100755 images/combo_button_editing.jpg diff --git a/images/checkbox.jpg b/images/checkbox.jpg new file mode 100755 index 00000000..38fa30e1 Binary files /dev/null and b/images/checkbox.jpg differ diff --git a/images/combo_button_choosing.jpg b/images/combo_button_choosing.jpg new file mode 100755 index 00000000..11483ed2 Binary files /dev/null and b/images/combo_button_choosing.jpg differ diff --git a/images/combo_button_editing.jpg b/images/combo_button_editing.jpg new file mode 100755 index 00000000..ff8d3c49 Binary files /dev/null and b/images/combo_button_editing.jpg differ diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc index e545ef06..aefbe1c7 100644 --- a/packet_builder_yaml.asciidoc +++ b/packet_builder_yaml.asciidoc @@ -1,577 +1,682 @@ -Packet Builder Language -======================= -:author: hhaim -:email: -:revnumber: 0.02 -:quotes.++: -:numbered: - -== change log - -[options="header",cols="^1,^h,a"] -|================= -| Version | name | meaning -| 0.01 | hhaim | -- first version -| 0.02 | hhaim -| -- change the bool fields to properties -- add external/internal property -- add const property ( instead cant_change) -- change TLV property - now learn the prev header -- add choice that is not base on a field ( TCP->IP->TCP) -|================= - - -== A file format for GUI packet builder - -=== Introduction - -We would like a file that will be read by GUI and will give us the ability to build packets using GUI - -The format should be *YAML* - - -=== High Level Requirement - -* Define a YAML object format for dynamic building of packets and a program that change various fields -* Ability to *parse* back the same buffer that was created using this tool (reversibility) -** Ability to load packet from a pcap file and parse it -* Ability to save the packet to a pcap file -* Ability to save the packet and program in JSON format (same JSON-RPC format) -* Set a value for any field of any protocol -* Vary packet fields across packets at run time e.g. changing IP/MAC addresses -* Stack protocols in any arbitrary order define in YAML format - -=== Header that should be supported (first phase) - -==== L2 - -* Ethernet -* 802.3 -* LLC SNAP -*VLAN (with QinQ) stack -*MPLS stack - -==== L3 - -* ARP -* IPv4 -* IPv6 (4x header) -* IP-in-IP a.k.a IP Tunnelling (6over4, 4over6, 4over4, 6over6) - -==== L4 - -* TCP -* UDP -* ICMPv4 -* ICMPv6 -* IGMP - -==== L7 anchor:Payload[] - -* Any text based protocol (HTTP, SIP, RTSP, NNTP etc.) -** random string -** repeat string - -* Pattern Binary -** repeat of value (e.g 0x55) -** random -** seq (1,2,3,3,4) -** User Hex Dump editor - - -=== YAML Format - -==== Header section - -.Default Types anchor:Types[] -[options="header",cols="1,2,3"] -|================= -| Field Name | meaning | size in bits -| bit | describe the header object e.g tcp | 1 -| uint8 | describe the header object e.g tcp | 8 -| uint16 | the name in the GUI | 16 -| uint32 | sub fields of this header | 32 -| uint64 | sub fields of this header | 64 -| Field_Type | name of a field type that define | The size of the field type "mac-addr" -| Payload | xref:Payload[Payload] | total packet size - all header until now -| vlen_t | in case of varible size header this include the size to the end of varible size header see example xref:IpvOption[Ipv4Option] |total size of the object -|================= - - -.Default Edit_Types anchor:Edit_Types[] -[options="header",cols="1,2"] -|================= -| Field Name | meaning -| none | use Hex Editor as Types -| ipv4_t | should match uint32_t type -| mac_addr_t | 00:00:00:00:00:00 define a regexp here TODO -| ipv4_mask_t| should match uint32_t type -| ipv6_t | should have 16 bytes field size 8x16 -| ipv6_mask_t | should have 16 bytes field size 8x16 -| another header class | sub fields of this header -| char_t | array of bytes , look into the array_size of cost string -| var_char_t | array based on a field value look into -| regexp_t | define a Java function that converts a reg exp string to a buffer see here xref:GenRegExp[RegExp] -|================= - - -.Default Properties anchor:Properties[] -[options="header",cols="1,2"] -|================= -| Field Name | meaning -| ipv4_checksum | auto calculates checksum on this header Ipv4 type -| tcp_udp_checsum | calculate next TCP checksum -| ipv4_total_length | calculate ipv4 total length this pkt_size = header + reset of packet -| tlv | TLV length of the header (inlcudes the prev field length) example ip-option, tcp-option -| le | little edian. deault is big -| const | const field for example the 4 version of ipv4 header - this GUI won't give option to change this field -| external | marks the header as an external header for the GUI. for example IPv4 is external header and mac-addr is internal header ( compose external header) -|================= - - -.Field_Type anchor:Field_Type[] -[options="header",cols="1,^1,30,^1,^1,30"] -|================= -| Field Name | value type | meaning | Default Value | Link | Example -| class | string | describe the class type | in case class is defined no need to have name and vise versa | | class : tcp -| name | string | describe the instance name | in case class is defined no need to have name and vise versa | | class : tcp -| help | string | the name in the GUI | no | | class : TCP -| array_size | integer | how many objects of this type, default value is 1 | 1 | | array_size : 6 in case of mac-addr -| type | string | type, see Types define the size | "uint8_t" | xref:Types[Types] | type : "uint32_t" type : "mac_addr" -| edit_type | string | edit_type Edit_Types | "none" | xref:Edit_Types[Edit_Types] | edit_type could get edit_type_regexp e.g edit_type = "ipv4" , edit_type = "regexp" edit_type_regexp = "string that define regexp and Java function" -| edit_type_regexp | string | in case it is reg_exp the name of the function |"none" | xref:GenRegExp[GenRegExp] | -| default | array of bytes | default value in the packets , you can override value for subfields in parent see example -| [0 ]x header size | | -| properies | array of string like masks -| properies of this fields | [] | xref:Properties[Properties] | ["le","external"] , ["tlv","le","const"] - -| choice | array | define the next protocol base on a field | none | xref:Choice[Choice] | -| next_headers | string or type | a name of class that define the next or just an array | "none" | xref:Choice[Next_headers] | -| fields | array | array of Field_Type | [] | | fields : [ ] -| offset | integer/string | offset into the packet in bits, in case of auto add base of prev fields | "auto" | | -| option | string | a java code that define a way to calculate varible size | "none" | | | -|================= - - -.Field_Type anchor:ConstHeadesClass[] -[options="header",cols="^1,^10"] -|================= -| Field Name | value type -| "root" | the root pointer to the start of blocks L2/802.3 etc -| "end" | end TLV headers -| "payload | the rest of the packets as buffer/string etc -|================= - - -.Next_headers anchor:Next_headers[] -Example of Next_headers -[source,python] ----- - - - class : "next-example-t-1" - help : "next-example-t-1" - next_headers : ["ip","ipv6,"tcp"] - -# option 1 define in the header itself - - class : "tcp" - help : "TCP header" - properies : ["external"] - next_headers : ["ip","ipv6,"tcp"] - fields : - - name : "ver" - -# option 2 define throw a class - - class : "tcp" - help : "TCP header" - properies : ["external"] - next_headers : "next-example-t-1" # - fields : - - name : "ver" ----- - - -.Choice anchor:Choice[] -Example of Choice -[source,python] ----- - fields : - - key : 0x0800 - val : "ip" # name of an external or internal class , the GUI should distinct betwean internal and external - - - key : 0x0860 - val : "ipv6" - - - key : 0x0810 # just an example don't realy remember the numbers - val : "vlan" - - - key : 0x0812 - val : "mpls" - default : [ "payload" ,"ip","tcp","sip"] the number could be any value ----- - - -.Generic RegExp Edit Field anchor:GenRegExp[] - -This will define a regexp that match for user input and how to converts it to buffer of bytes - -[source,python] ----- - -class MyClass : public RegExpBase { - public: - - - string get_reg_exp_string( ) { - return ((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})) - } - - # in case of match - buffer get_buffer(){ - g= [get_group()[1].to_int()*256,get_group()[1].to_int()] - # return list - return (g) - } - -} - ----- - - - -==== Relations between object headers - -There would be a root object to point to possible choice - - -[source,python] ----- - -- class : "root" - help : "Root" - choice : - default : [ "ethrenet" ,"llc","_802-3"] ----- - -So in a way you could define a tree like this - -[source,python] ----- - -root -> L2 ( Ethernet , 802.3 , LLC SNAP ) - |( by field ) - | - ------------------------------------- ( VLAN (with QinQ), MPLS , ipv4, ipv6, ARP , ICMP ) - | | | | - | ipv4/ipv6 - - - | | - | | - [Possibility - Ethernet/802.3/LLC SNAP) | UDP/TCP/Pyload - Object | | - for each option there tree of all the option --- - ----- - - -==== Rules - -* The size of the header and offset is automatically defined in default by the order of the fields ( inc by type size multiply by array_size) -* It can be overrided by offset field ( put offset in the object ) and then an more advanced field can be shown earlier in the GUI -* The packet size is defined before the headers. Header Should not be allowed to be added if the size + header size is bigger than packet size -* "Payload" is predefined Fields that take the reset of the packet and user can edit it ( see xref:Payload[Payload] ) -* There would be a spare field in the Stream object so GUI could add more metadata for reconstructing the builder types - for example in this example Ethrenet/IP/TCP/IP/TCP you can't extrac from buffer alone that Payload is IP/TCP only the builder known that in build time. -* Ip total length need to keep the total_pkt_size - this ip header . this should work for internal header too. -* When GUI add header ("external") the total size of this header should be calculated ( varible size should be given a default - ipv4) - - -=== Examples - - -==== TLV (Ip option) anchor:IpvOption[] - - -IP-option see link:http://tools.ietf.org/html/rfc791[ip_option] - -0 : END -1 : Length 1 -other : Byte : Length ( +first) |option - - - - -[source,python] ----- - - - class : "ip_option_131" - help : "ip_option" - fields : - - name : "length" # tree with leaf of bits - help : "length" - type : uint8_t - properties : ["tlv"] # the length include the prev field size (8 byte) - - - name : "pointer" # tree with leaf of bits - type : uint8_t - - - name : "buffer" # tree with leaf of bits - type : "tlv_reset" - - - class : "default_ip4_option_tlv" - help : "ip_option" - fields : - - name : "length" # tree with leaf of bits - help : "length" - type : uint8_t - properties : "tlv" # the length include the prev field size (8 byte) - - - name : "buffer" # tree with leaf of bits - type : "vlen_t" - - - - class : "ip_option" - help : "ip_option" - type : uint8_t - default : [0x01] - choice : - fields: - - key : 0x00 - val : "end" # reserve name for ending the loop - - - key : 0x01 - val : "ip_option" # back to this header - - - key : 0x131 - val : "ip_option_131" - - - key : 0x0812 - val : "gre" - - default : "default_ip4_option_tlv" - - ----- - -* case of varible length field ip_option example - - - -==== Example TCP/IP - - -[source,python] ----- - - - class : "c-mac-addr" - help : "Mac addrees" - type : "uint8" - array_size : 6 - edit_type : "mac-addr_t" # format (\d\d[:]){5}[:]\d\d - default : [0x00,0x00,0x01,0x00,0x00,0x00] - - - - class : "c-ethr-l2" - help : "Ethernet-L2" - fields : - - name : "Dst" - help : "destination mac" - type : "c-mac-addr" - - - name : "Src" - help : "source mac" - type : "c-mac-addr" - - - name : "ip_protocol" - type : "uint16_t" - default : [0x08,0x00] - choice : - fields : - - key : 0x0800 - val : "ip" - - - key : 0x0860 - val : "ipv6" - - - key : 0x0810 # just an example don't realy remember the numbers - val : "vlan" - - - key : 0x0812 - val : "mpls" - default : "payload" - - - - class : "ipv4" - help : "Ipv4" - fields : - - name : "ver" - help : "Version" - type : "bit" - array_size : 4 - default : [4] - - - name : "ihl" - help : "IHL" - type : "bit" - array_size : 4 - default : [7] - properties : ["ipv4_ihl","const"] - - .. - - - name : "hdr_chsum" - help : "Header Checksum" - default : [0x00,0x00] - properties : ["ipv4_check_sum"] - - - name : "total_len" - help : "Total Length" - default : [0x00,0x00] - properties : ["ipv4_total_len"] # auto calculate total_size-offset_header - - - name : "protocol" - help : "Protocol" - type : uint8_t - default : [0x06] - choice : - fields: - - key : 0x06 - val : "tcp" - - - key : 0x11 - val : "udp" - - - key : 0x01 # just an example don't realy remember the numbers - val : "ip" # class name - - - key : 0x0812 - val : "gre" - default : "payload" - - - name : "src_addr" - help : "Source Address" - type : uint32_t - default : [0x10,0x00,0x00,0x00] - edit_type : "ipv4" # reserve - - - name : "dst_addr" - help : "Destination Address" - default : [0x30,0x00,0x00,0x00] - type : uint32_t - edit_type : "ipv4" # reserve - - - - class : "tcp" - help : "TCP" - properties : ["external"] - fields : - - name : "src_port" - help : "Source Port" - default : [0x30,0x00] - type : uint16_t - - - name : "dest_port" - help : "Source Port" - default : [0x30,0x00] - type : uint16_t - - - name : "seq" - help : "Seq Number" - type : uint32_t - default : [0x30,0x00,00,00] - - - name : "ack" - help : "Ack Number" - type : uint32_t - default : [0x30,0x00,00,00] - - ... - - - name : "flags" # tree with leaf of bits - help : "Ack Number" - type : uint8_t - default : [0x30] - fields : - - name : "urg" - help : "URG" - type : bit - default : [0x0] - - - name : "ack" - help : "ACK" - type : bit - default : [0x1] - .. - - - name : "checksum" - help : "Checksum" - type : uint16_t - default : [0x00,0x00] - properties : ["tcp_checksum"] # auto calculate total_size-offset_header - - -- class : "root" # reserve - help : "Root" - choice : - default : [ "ethrenet" ,"llc","_802-3"] ---------------------------- - - -==== Overide subfields - -In this example parent class default value override default value of sub-fields ( 2 diffrent mac-addr) - -[source,python] ----- - - - class : "c-mac-addr" - help : "Mac addrees" - type : "uint8" - array_size : 6 - edit_type : "mac-addr_t" # format (\d\d[:]){5}[:]\d\d - default : [0x00,0x00,0x01,0x00,0x00,0x00] - - - - class : "c-ethr-l2" - help : "Ethernet-L2" - properties : ["external"] - default : [0x00,0x01,0x01,0x00,0x00,0x00, 0x00,0x02,0x02,0x00,0x00,0x00 ,0x08,00] # change the default of sub-fields . it is const size - fields : - - name : "Dst" - help : "destination mac" - type : "c-mac-addr" - - - name : "Src" - help : "source mac" - type : "c-mac-addr" - - - name : "ip_protocol" - type : "uint16_t" - default : [0x08,0x00] - choice : - fields : - - key : 0x0800 - val : "ip" - - - key : 0x0860 - val : "ipv6" - - - key : 0x0810 # just an example don't realy remember the numbers - val : "vlan" - - - key : 0x0812 - val : "mpls" - default : "payload" ----- - - -==== Union base - -TBD - - - -=== Resource -* link:https://wireedit.com/[WireEdit] -* link:http://ostinato.org/[ostinato] -* link:http://www.slideshare.net/nlekh/ixiaexplorer[IxExplorer] - +Packet Builder Language +======================= +:author: hhaim +:email: +:revnumber: 0.03 +:quotes.++: +:numbered: + +== change log + +[options="header",cols="^1,^h,a"] +|================= +| Version | name | meaning +| 0.01 | hhaim | +- first version +| 0.02 | hhaim +| +- change the bool fields to properties +- add external/internal property +- add const property ( instead cant_change) +- change TLV property - now learn the prev header +- add choice of next protocol that is not base on a field ( TCP->IP->TCP) +| 0.03 | ybrustin +| +- add MAC address regexp +- add gui_representation class with data_type, form_type, combobox_values, data_type_regexp items to describe GUI view of field +- rename choice attribute to value_based_next_header +- fixed some typos + +|================= + + +== A file format for GUI packet builder + +=== Introduction + +We would like a file that will be read by GUI and will give us the ability to build packets using GUI + +The format should be *YAML* + + +=== High Level Requirement + +* Define a YAML object format for dynamic building of packets and a program that change various fields +* Ability to *parse* back the same buffer that was created using this tool (reversibility) +** Ability to load packet from a pcap file and parse it +* Ability to save the packet to a pcap file +* Ability to save the packet and program in JSON format (same JSON-RPC format) +* Set a value for any field of any protocol +* Vary packet fields across packets at run time e.g. changing IP/MAC addresses +* Stack protocols in any arbitrary order define in YAML format + +=== Header that should be supported (first phase) + +==== L2 + +* Ethernet +* 802.3 +* LLC SNAP +*VLAN (with QinQ) stack +*MPLS stack + +==== L3 + +* ARP +* IPv4 +* IPv6 (4x header) +* IP-in-IP a.k.a IP Tunnelling (6over4, 4over6, 4over4, 6over6) + +==== L4 + +* TCP +* UDP +* ICMPv4 +* ICMPv6 +* IGMP + +==== L7 anchor:Payload[] + +* Any text based protocol (HTTP, SIP, RTSP, NNTP etc.) +** random string +** repeat string + +* Pattern Binary +** repeat of value (e.g 0x55) +** random +** seq (1,2,3,3,4) +** User Hex Dump editor + + +=== YAML Format + +==== Header section + +.Default Types anchor:Types[] +[options="header",cols="1,2,3"] +|================= +| Field Name | meaning | size in bits +| bit | describe the header object e.g tcp | 1 +| uint8 | describe the header object e.g tcp | 8 +| uint16 | the name in the GUI | 16 +| uint32 | sub fields of this header | 32 +| uint64 | sub fields of this header | 64 +| other class type | name of other class. for example, "c-mac-addr"; take fields from there, optionally overload them later | The size taken from that class +| Payload | xref:Payload[Payload] | total packet size - all header until now +| vlen_t | in case of varible size header this include the size to the end of varible size header see example xref:IpvOption[Ipv4Option] |total size of the object +|================= + + +.Default Data_Type anchor:Data_Type[] +[options="header",cols="1,2"] +|================= +| Field Name | meaning +| none | use Hex Editor as Types +| ipv4_t | 4 decimals 0-255 each +| mac_addr_t | ([0-9a-fA-F]\{2\}:)\{5\}[0-9a-fA-F]\{2\} +| ipv4_mask_t | should match uint32_t type +| ipv6_t | should have 16 bytes field size 8x16 +| ipv6_mask_t | should have 16 bytes field size 8x16 +| another header class | sub fields of this header +| char_t | array of bytes , look into the array_size of cost string +| var_char_t | array based on a field value look into +| regexp_t | define a Java function that converts a reg exp string to a buffer see here xref:GenRegExp[RegExp] +|================= + +.Default Form_Type anchor:Form_Type[] +[options="header",cols="1,3"] +|================= +| Field Name | meaning +| none | simple editing field +| combo_with_edit | combo box with predefined choices, can edit the field value manually +| combo_without_edit | combo box with predefined choices, can [underline]#not# edit the field value manually +| checkbox | toggle bits values, if item is array of bits, display several checkboxes per number of bits +|================= + + +.Default Gui_Representation anchor:Gui_Representation[] +[options="header",cols="1,^1,5,^1,10"] +|================= +| Field Name | value type | meaning | Link | Additional info +| data_type | string | how to represent data | xref:Data_Type[Data_Type] | data_type could get data_type_regexp e.g data_type = "ipv4"; data_type = "regexp" data_type_regexp = "string that define regexp and Java function" +| form_type | string | which editing form to use | xref:Form_Type[Form_Type] | for example for ip address use combobox with option to edit value manually or choose: key "localhost" value "127.0.0.1" etc. +| combobox_values | array | pairs of 'key - value' for combo_with/without_edit | | +| data_type_regexp | string | in case it is reg_exp the name of the function | xref:GenRegExp[GenRegExp] | +|================= + + +.Default Properties anchor:Properties[] +[options="header",cols="1,7"] +|================= +| Field Name | meaning +| ipv4_checksum | auto calculates checksum on this header Ipv4 type +| tcp_udp_checsum | calculate next TCP checksum +| ipv4_total_length | calculate ipv4 total length this pkt_size = header + reset of packet +| tlv | TLV length of the header (inlcudes the prev field length) example ip-option, tcp-option +| le | little endian. deault is big +| const | const field for example the 4 version of ipv4 header - this GUI won't give option to change this field +| external | marks the header as an external header for the GUI. for example IPv4 is external header and mac-addr is internal header ( compose external header) +|================= + + +.Field_Type anchor:Field_Type[] +[options="header",cols="1,^1,30,^1,^1,30"] +|================= +| Field Name | value type | meaning | Default Value | Link | Example +| class | string | describe the class type | in case class is defined no need to have name and vise versa | | class : tcp +| name | string | describe the instance name | in case class is defined no need to have name and vise versa | | name : tcp +| help | string | the name in the GUI | no | | class TCP, Layer 4 +| array_size | integer | how many objects of this type, default value is 1 | 1 | | array_size : 6 in case of mac-addr +| type | string | type, see Types define the size | "uint8_t" | xref:Types[Types] | type : "uint32_t" type : "mac_addr" +| gui_representation | object | description of how to view/edit data in GUI | | xref:Gui_Representation[Gui_Representation] | xref:Gui_Representation_Example[Gui_Representation_Example] +| default | array/value | default value in the packets , you can override value for subfields in parent see example +| [0 ]x header size | | xref:Overide_Subfields_Example[Overide_Subfields_Example] +| properies | array of string like masks +| properies of this fields | [] | xref:Properties[Properties] | ["le","external"] , ["tlv","le","const"] +| value_based_next_header | array | define the next protocol based on a field value | none | xref:Value_Based_Next_Header[Value_Based_Next_Header] | +| next_headers | string or type | a name of class that define the next or just an array | "none" | xref:Next_headers[Next_headers] | +| fields | array | array of Field_Type | [] | | fields : [ ] +| offset | integer/string | offset into the packet in bits, in case of auto add base of prev fields | "auto" | | +| option | string | a java code that define a way to calculate varible size | "none" | | | +|================= + + +.Field_Type anchor:ConstHeadesClass[] +[options="header",cols="^1,^10"] +|================= +| Field Name | value type +| "root" | the root pointer to the start of blocks L2/802.3 etc +| "end" | end TLV headers +| "payload" | the rest of the packets as buffer/string etc +|================= + + +.Next_headers anchor:Next_headers[] +Example of Next_headers +[source,python] +---- + + - class : "next-example-t-1" + help : "next-example-t-1" + next_headers : ["ip","ipv6,"tcp"] + +# option 1 define in the header itself + - class : "tcp" + help : "TCP header" + properies : ["external"] + next_headers : ["ip","ipv6,"tcp"] + fields : + - name : "ver" + +# option 2 define throw a class + - class : "tcp" + help : "TCP header" + properies : ["external"] + next_headers : "next-example-t-1" # + fields : + - name : "ver" +---- + + +.Value_Based_Next_Header anchor:Value_Based_Next_Header[] +Example of value_based_next_header +[source,python] +---- + fields : + - key : 0x0800 + val : "ip" # name of an external or internal class , the GUI should distinct betwean internal and external + + - key : 0x86DD + val : "ipv6" + + - key : 0x8100 + val : "vlan" + + - key : 0x8847 + val : "mpls" # unicast + default : "payload" # if no match for any of above +---- + + +.Generic RegExp Edit Field anchor:GenRegExp[] + +This will define a regexp that match for user input and how to converts it to buffer of bytes + +[source,python] +---- + +class MyClass : public RegExpBase { + public: + + + string get_reg_exp_string( ) { + return ((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})) + } + + # in case of match + buffer get_buffer(){ + g= [get_group()[1].to_int()*256,get_group()[1].to_int()] + # return list + return (g) + } + +} + +---- + + + +==== Relations between object headers + +There would be a root object to point to possible starting headers + + +[source,python] +---- + +- class : "root" + help : "Root" + next_headers : [ "ethrenet" ,"llc","_802-3"] +---- + +So in a way you could define a tree like this + +[source,python] +---- + +root -> L2 ( Ethernet , 802.3 , LLC SNAP ) + |( by field ) + | + ------------------------------------- ( VLAN (with QinQ), MPLS , ipv4, ipv6, ARP , ICMP ) + | | | | + | ipv4/ipv6 - - + | | + | | + [Possibility - Ethernet/802.3/LLC SNAP) | UDP/TCP/Pyload + Object | | + for each option there tree of all the option --- - +---- + + +==== Rules + +* The size of the header and offset is automatically defined in default by the order of the fields ( inc by type size multiply by array_size) +* It can be overrided by offset field ( put offset in the object ) and then an more advanced field can be shown earlier in the GUI +* The packet size is defined before the headers. Header Should not be allowed to be added if the size + header size is bigger than packet size +* "Payload" is predefined Fields that take the reset of the packet and user can edit it ( see xref:Payload[Payload] ) +* There would be a spare field in the Stream object so GUI could add more metadata for reconstructing the builder types + for example in this example Ethrenet/IP/TCP/IP/TCP you can't extrac from buffer alone that Payload is IP/TCP only the builder known that in build time. +* Ip total length need to keep the total_pkt_size - this ip header . this should work for internal header too. +* When GUI add header ("external") the total size of this header should be calculated ( varible size should be given a default - ipv4) + + +=== Examples + + +==== TLV (Ip option) anchor:IpvOption[] + + +IP-option see link:http://tools.ietf.org/html/rfc791[ip_option] + +0 : END + +1 : Length 1 + +other : Byte : Length ( +first) |option + + + +[source,python] +---- + + - class : "ip_option_131" + help : "ip_option" + fields : + - name : "length" # tree with leaf of bits + help : "length" + type : uint8 + properties : ["tlv"] # the length include the prev field size (8 byte) + + - name : "pointer" # tree with leaf of bits + type : uint8 + + - name : "buffer" # tree with leaf of bits + type : "tlv_reset" + + - class : "default_ip4_option_tlv" + help : "ip_option" + fields : + - name : "length" # tree with leaf of bits + help : "length" + type : uint8 + properties : "tlv" # the length include the prev field size (8 byte) + + - name : "buffer" # tree with leaf of bits + type : "vlen_t" + + + - class : "ip_option" + help : "ip_option" + type : uint8 + default : [0x01] + value_based_next_header : + fields: + - key : 0x00 + val : "end" # reserve name for ending the loop + + - key : 0x01 + val : "ip_option" # back to this header + + - key : 0x131 + val : "ip_option_131" + + - key : 0x0812 + val : "gre" + + default : "default_ip4_option_tlv" + + +---- + +* case of varible length field ip_option example + + + +==== Example TCP/IP + + +[source,python] +---- + + - class : "c-mac-addr" + help : "Mac addrees" + type : "uint8" + array_size : 6 + default : [0x00, 0x00, 0x01, 0x00, 0x00, 0x00] + gui_representation: + data_type : "mac-addr_t" # format ([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2} + + + - class : "ethernet" + help : "Ethernet-L2" + properties: ['external'] + fields : + - name : "Dst" + help : "destination mac" + type : "c-mac-addr" + + - name : "Src" + help : "source mac" + type : "c-mac-addr" + + - name: "Ethertype" + help: "Ethertype" + type: "uint16" + default: [0x0800] + value_based_next_header : + fields : + - key : 0x0800 + val : "ip" + + - key : 0x86DD + val : "ipv6" + + - key : 0x8100 + val : "vlan" + + - key : 0x8847 + val : "mpls" #unicast + default : "payload" + + + - class : "ipv4" + help : "Ipv4" + fields : + - name : "ver" + help : "Version" + type : "bit" + array_size : 4 + default : [0, 1, 0, 0] + properties : ["const"] + + - name : "ihl" + help : "IHL" + type : "bit" + array_size : 4 + default : [0, 1, 1, 1] + properties : ["ipv4_ihl"] + gui_representation: + form_type: "checkbox" + + .. + + - name : "hdr_chsum" + help : "Header Checksum" + default : [0x00,0x00] + properties : ["ipv4_check_sum", "const"] + + - name : "total_len" + help : "Total Length" + default : [0x00,0x00] + properties : ["ipv4_total_len", "const"] # auto calculate total_size-offset_header + + - name : "protocol" + help : "Protocol" + type : uint8 + default : 0x06 + value_based_next_header : &ipv4_next_header + fields: + - key : 0x06 + val : "tcp" + + - key : 0x11 + val : "udp" + + - key : 0x29 + val : "ipv6" + + - key : 0x2F + val : "gre" + default : "payload" + gui_representation: + form_type: "combo_without_edit" + combobox_values: + <<: *ipv4_next_header # take same choices as value_based_next_header + + - name : "src_addr" + help : "Source Address" + type : uint32 + default : [16, 0, 0, 0] + gui_representation: + data_type : "ipv4" # reserve + + - name : "dst_addr" + help : "Destination Address" + default : [48, 0, 0, 0] + type : uint32 + gui_representation: + data_type : "ipv4" # reserve + form_type : "combo_with_edit" + combobox_values: + fields: + - key : [127, 0, 0, 1] + - value : "localhost" + + - key : [255, 255, 255, 255] + - value : "broadcast" + + + - class : "tcp" + help : "TCP" + properties : ["external"] + fields : + - name : "src_port" + help : "Source Port" + default : [0x30,0x00] + type : uint16 + + - name : "dest_port" + help : "Source Port" + default : [0x30,0x00] + type : uint16 + + - name : "seq" + help : "Seq Number" + type : uint32 + default : [0x30,0x00,00,00] + + - name : "ack" + help : "Ack Number" + type : uint32 + default : [0x30,0x00,00,00] + + ... + + - name : "flags" # tree with leaf of bits + help : "Ack Number" + type : uint8 + default : [0x30] + fields : + - name : "urg" + help : "URG" + type : bit + default : [0x0] + + - name : "ack" + help : "ACK" + type : bit + default : [0x1] + .. + + - name : "checksum" + help : "Checksum" + type : uint16 + default : [0x00,0x00] + properties : ["tcp_checksum"] # auto calculate total_size-offset_header + + +- class : "root" # reserve + help : "Root" + next_headers : [ "ethrenet" ,"llc","_802-3"] +--------------------------- + + +==== Overide subfields example anchor:Overide_Subfields_Example[] + +In this example parent class default value overrides default values of sub-fields ( 2 different mac-addr) + +[source,python] +---- + + - class : "c-mac-addr" + help : "Mac addrees" + type : "uint8" + array_size : 6 + gui_representation: + data_type : "mac-addr_t" # format ([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2} + default : [0x00,0x00,0x01,0x00,0x00,0x00] + + + - class : "ethernet" + help : "Ethernet-L2" + properties : ["external"] + default : [0x00,0x01,0x01,0x00,0x00,0x00, 0x00,0x02,0x02,0x00,0x00,0x00 ,0x08,00] # change the default of sub-fields . it is const size + fields : + - name : "Dst" + help : "destination mac" + type : "c-mac-addr" + + - name : "Src" + help : "source mac" + type : "c-mac-addr" + + - name : "ip_protocol" + type : "uint16_t" + default : [0x08,0x00] + value_based_next_header : + fields : + - key : 0x0800 + val : "ip" + + - key : 0x86DD + val : "ipv6" + + - key : 0x8100 + val : "vlan" + + - key : 0x8847 + val : "mpls unicast" + default : "payload" +---- + +==== Gui Representation example anchor:Gui_Representation_Example[] +[underline]#In YAML:# +[source,python] +---- + - name: 'Flags' + help: 'IPv4 Flags' + type: 'bit' + array_size: 3 + gui-representation: + form_type: 'checkbox' # can check each bit + + + - name: 'dst_addr' + help: 'IPv4 Destination Address' + default: [48, 0, 0, 0] + type: uint32 + gui-representation: + data_type: 'ipv4_t' # special representation case, show as 4 decimal numbers + form_type: 'combo_with_edit' # can choose from pre-defined values or edit manually + combobox_values: + fields: + - key: [127, 0, 0, 1] + - val: 'localhost' + + - key: [255, 255, 255, 255] + - val: 'broadcast' + + + - name: 'protocol' + help: 'IPv4 Protocol Field' + type: uint8 + default: 0x06 + value_based_next_header: &ipv4_next_header + fields: + - key: 0x06 + val: 'tcp' + + - key: 0x11 + val: 'udp' + default : "payload" + gui-representation: + form_type: 'combo_without_edit' # choose from supported protocols, no manual edit + combobox_values: + <<: *ipv4_next_header # take same choices as value_based_next_header +---- + +[underline]#In GUI:# + +checkbox for bits: + +image:images/checkbox.jpg[] + +editing in combo-box: + +image:images/combo_button_editing.jpg[] + +choosing from predefined values: + +image:images/combo_button_choosing.jpg[] + +==== Union base + +TBD + + + +=== Resource +* link:https://wireedit.com/[WireEdit] +* link:http://ostinato.org/[ostinato] +* link:http://www.slideshare.net/nlekh/ixiaexplorer[IxExplorer] + -- cgit 1.2.3-korg From bd16d1ad4537248ae1e9e65df5b873cf7751555e Mon Sep 17 00:00:00 2001 From: imarom Date: Thu, 10 Sep 2015 01:45:57 -0400 Subject: added stats section --- trex_rpc_server_spec.asciidoc | 115 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 6f3e21e9..f199529e 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -876,4 +876,119 @@ if both are enabled then 10 bytes will be used. ---- +=== Get Global Stats +* *Name* - 'get_global_stats' +* *Valid States* - 'owned', 'active' +* *Description* - Get machine global stats +* *Paramters* - None + +* *Result* ['object'] - See Below + +.Return value of 'get_global_stats' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| state | string | server state: can be 'unowned', 'owned' or 'active' +| cpu_util | double | DP CPU util. in % +| tx_bps | double | total TX bits per second +| rx_bps | double | total RX bits per second +| tx_pps | double | total TX packets per second +| rx_pps | double | total RX packets per second +| tx_cps | double | total TX connection per second +| tx_expected_bps | double | expected TX bits per second +| tx_expected_pps | double | expected TX packets per second +| tx_expected_cps | double | expected TX connections per second +| rx_drop_bps | double | drop rate in bits per second +| total_tx_pkts | int | total TX packets +| total_rx_pkts | int | total RX packets +| total_rx_bytes | int | total TX bytes +| total_tx_bytes | int | total RX bytes +|================= + +=== Get Port Stats +* *Name* - 'get_port_stats' +* *Valid States* - 'owned', 'active' +* *Description* - Get port stats +* *Paramters* +** *port_id* [int] - The port id for query + +* *Result* ['object'] - See Below + +.Return value of 'get_port_stats' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| active | boolean | 'true' if the port is in transmitting phase +| opackets | int | total output packets +| ipackets | int | total input packets +| obytes | int | total output bytes +| ibytes | int | total input bytes +| oerrors | int | total output errors +| ierrors | int | total input errors +|================= + +=== Get Stream Stats +* *Name* - 'get_steram_stats' +* *Valid States* - 'owned', 'active' +* *Description* - Get port stats +* *Paramters* +** *port_id* [int] - The port id for query +** *stream_id* [int] - The stream id for query + +* *Result* ['object'] - See Below + +.Return value of 'get_stream_stats' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| tx_pkts | int | total TX packets +| rx_pkts | int | total RX packets [only if 'rx_stats' is enabled], o.w always zero +|================= + + +== Typical Transactions Examples +the following examples represents common scenarios. +commands in [...] represents 'meta commands' +and not real RPC commands such as 'repeat', 'wait' and etc. + +=== Init/Boot +This sequence represents a client implementing the protocol taking ownership +over the server and preparing to perform work + +==== Commands Flow +* *ping* - Ping the server to verify the server is up +* *get_owner* - if owner is not me or 'none' prompt to the user if he wants to force it +* *acquire* - Ask or force for exclusive control over the server. save the 'handler' given for future commands +* *get_version* - Verify the server is compatible with the GUI +* *get_system_info* - Get the installed ports and cores +* *get_stream_list* - for every port, get the list and sync the GUI +* *get_stream* - for every stream in a port list, get the stream info and sync the GUI + +=== Simple Traffic With Adding / Editing Streams + +describes a simple scenario where a user wants to +add or edit one or more streams to one or more ports + +==== Commands Flow +* *[init]* - perform the init procedure from above +* *[GUI add/edit streams]* - GUI provides the user a way to add or edit streams and sync them +* *remove_all_streams* ['optional'] - remove all previous streams to start from scratch +* *add_stream* - configure a specific port with a stream. +* *['repeat previous']* - 'repeat' the above for how many ports and streams desired +* *get_stream_list* ['optional'] - sanity - verify the server is synced with the GUI +* *start_traffic* - start traffic on the specific port / all the ports +* *get_global_stats* ['optional'] - make sure the machine is transmiting traffic +* *['wait']* - 'wait' for the time of the test +* *stop_traffic* - when done, stop the traffic on the specific port / all the ports +* *get_global_stats* ['optional'] - make sure the machine has stopped + +=== Logout + +Describes the log off from the machine + +==== Commands Flow +* *stop_traffic* ['optional'] - if traffic has started - stop it +* *get_global_stats* ['optional'] - make sure the machine has stopped +* *remove_all_streams* ['optional'] - if you want to clear all the previous streams - use this +* *release* - release the ownership over the device -- cgit 1.2.3-korg From 44caae485f05b97af3de70ac267d72a444cd3866 Mon Sep 17 00:00:00 2001 From: imarom Date: Wed, 16 Sep 2015 08:04:26 +0300 Subject: minor fixes to the doc --- images/rpc_states.png | Bin 24137 -> 58390 bytes trex_rpc_server_spec.asciidoc | 113 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 92 insertions(+), 21 deletions(-) diff --git a/images/rpc_states.png b/images/rpc_states.png index 57c0ac17..cdbf1c51 100644 Binary files a/images/rpc_states.png and b/images/rpc_states.png differ diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index f199529e..728781bc 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -209,9 +209,9 @@ For commands valid only on 'owned' or 'active', a field called ''handler'' 'MUST along with the rest of the parameters. -This will identify the connection. +This will identify the connection: -image::images/rpc_states.png[title="RPC Server States",align="left",width=200, link="images/rpc_states.png"] +image::images/rpc_states.png[title="RPC Server States",align="left",width=150, link="images/rpc_states.png"] == RPC Commands The following RPC commands are supported @@ -246,10 +246,10 @@ Example: ---- -=== Get Registered Commands -* *Name* - 'get_reg_cmds' +=== Get Server Supported Commands +* *Name* - 'get_supported_cmds' * *Valid States* - 'all' -* *Description* - Queries the server for all the registered commands +* *Description* - Queries the server for all the supported commands * *Paramters* - None * *Result* ['array'] - A list of all the supported commands by the server @@ -262,7 +262,7 @@ Example: { "jsonrpc": "2.0", "id": 1, - "method": "get_reg_cmds", + "method": "get_supported_cmds", "params": null } @@ -304,6 +304,34 @@ Example: | built_by | string | who built this version |================= +[source,bash] +---- + +'Request': + +{ + "id": "wapkk8m6", + "jsonrpc": "2.0", + "method": "get_version", + "params": null +} + + +'Response': + +{ + "id": "wapkk8m6", + "jsonrpc": "2.0", + "result": { + "build_date": "Sep 16 2015", + "build_time": "12:33:01", + "built_by": "imarom", + "version": "v0.0" + } +} + +---- + === Get System Info * *Name* - 'get_system_info' * *Description* - Queries the server for system properties @@ -317,7 +345,6 @@ Example: | dp_core_count | int | DP core count | core_type | string | DP core type | hostname | string | machine host name -| ip | string | machine IP | uptime | string | uptime of the server | port_count | int | number of ports on the machine | ports | array | arary of object ''port'' - see below @@ -329,10 +356,63 @@ Example: | Field | Type | Description | driver | string | driver type | speed | string | speed of the port (1g, 10g, 40g, 100g) -| status | string | up / down +| status | string | 'down', 'idle' or 'transmitting' |================= +[source,bash] +---- + +'Request': + +{ + "id": "zweuldlh", + "jsonrpc": "2.0", + "method": "get_system_info", + "params": null +} + +'Response': + +{ + "id": "zweuldlh", + "jsonrpc": "2.0", + "result": { + "core_type": "Intel(R) Xeon(R) CPU E5-2650 0 @ 2.00GHz", + "dp_core_count": 1, + "hostname": "csi-kiwi-03.cisco.com", + "port_count": 4, + "ports": [ + { + "driver": "E1000", + "index": 0, + "speed": "1g", + "status": "down" + }, + { + "driver": "E1000", + "index": 1, + "speed": "1g", + "status": "down" + }, + { + "driver": "E1000", + "index": 2, + "speed": "1g", + "status": "down" + }, + { + "driver": "E1000", + "index": 3, + "speed": "1g", + "status": "down" + } + ] + } +} + +---- + === Get Owner * *Name* - 'get_owner' * *Valid States* - 'all' @@ -548,11 +628,8 @@ Describes rx stats for the stream {zwsp} + -'IMPORTANT': - -In case rx_stats is enabled, meta data will be written in the end of the packet. - -please consider the following: +IMPORTANT: In case rx_stats is enabled, meta data will be written in the end of the packet. +please also consider the following constraints: ==== Constrains * *performance* - this will have performance impact as rx packets will be examined @@ -894,11 +971,6 @@ if both are enabled then 10 bytes will be used. | rx_bps | double | total RX bits per second | tx_pps | double | total TX packets per second | rx_pps | double | total RX packets per second -| tx_cps | double | total TX connection per second -| tx_expected_bps | double | expected TX bits per second -| tx_expected_pps | double | expected TX packets per second -| tx_expected_cps | double | expected TX connections per second -| rx_drop_bps | double | drop rate in bits per second | total_tx_pkts | int | total TX packets | total_rx_pkts | int | total RX packets | total_rx_bytes | int | total TX bytes @@ -923,8 +995,7 @@ if both are enabled then 10 bytes will be used. | ipackets | int | total input packets | obytes | int | total output bytes | ibytes | int | total input bytes -| oerrors | int | total output errors -| ierrors | int | total input errors +| tx-rx-error | int | total Tx/Rx errors |================= === Get Stream Stats @@ -978,7 +1049,7 @@ add or edit one or more streams to one or more ports * *get_stream_list* ['optional'] - sanity - verify the server is synced with the GUI * *start_traffic* - start traffic on the specific port / all the ports * *get_global_stats* ['optional'] - make sure the machine is transmiting traffic -* *['wait']* - 'wait' for the time of the test +* *['perfrom test']* - perform the required test * *stop_traffic* - when done, stop the traffic on the specific port / all the ports * *get_global_stats* ['optional'] - make sure the machine has stopped -- cgit 1.2.3-korg From e9960e1510b94b946c1b790db7d050c5641d1c83 Mon Sep 17 00:00:00 2001 From: imarom Date: Thu, 17 Sep 2015 08:52:53 +0300 Subject: fixed doc to match owning model per port --- trex_rpc_server_spec.asciidoc | 79 +++++++++++++++++++++++++++---------------- 1 file changed, 50 insertions(+), 29 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 728781bc..c08447dd 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -193,32 +193,32 @@ The following diagram illustres the RPC server component's place: image::images/rpc_server_big_picture.png[title="RPC Server Position",align="left",width=800, link="images/rpc_server_big_picture.png"] -== RPC Server State Machine -The RPC server can be in numbered of states, each state provides other subset of the commands +== RPC Server Port State Machine +Any port on the server can be in numbered of states, each state provides other subset of the commands that are allowed to be executed. We define the following possible states: -* *unowned* - The server is either unowned or another user is owning the device -* *owned* - The server has been acquired by the client -* *active* - The server is in the middle of injecting traffic - currently active +* *unowned* - The specific port is either unowned or another user is owning the port +* *owned* - The specific port has been acquired by the client +* *active* - The specific port is in the middle of injecting traffic - currently active -Each command will specify on which states it is possible to execute it. +Each port command will specify on which states it is possible to execute it. -For commands valid only on 'owned' or 'active', a field called ''handler'' 'MUST' be passed +For port related commands valid only on 'owned' or 'active', a field called ''handler'' 'MUST' be passed along with the rest of the parameters. This will identify the connection: -image::images/rpc_states.png[title="RPC Server States",align="left",width=150, link="images/rpc_states.png"] +image::images/rpc_states.png[title="Port States",align="left",width=150, link="images/rpc_states.png"] == RPC Commands The following RPC commands are supported === Ping * *Name* - 'ping' -* *Valid States* - 'all' +* *Valid States* - 'not relevant' * *Description* - Pings the TRex server * *Paramters* - None * *Result* ['string'] - "ACK" On Sucess @@ -248,7 +248,7 @@ Example: === Get Server Supported Commands * *Name* - 'get_supported_cmds' -* *Valid States* - 'all' +* *Valid States* - 'not relevant' * *Description* - Queries the server for all the supported commands * *Paramters* - None * *Result* ['array'] - A list of all the supported commands by the server @@ -289,7 +289,7 @@ Example: === Get Version * *Name* - 'get_version' -* *Valid States* - 'all' +* *Valid States* - 'not relevant' * *Description* - Queries the server for version information * *Paramters* - None * *Result* ['object'] - See table below @@ -416,8 +416,9 @@ Example: === Get Owner * *Name* - 'get_owner' * *Valid States* - 'all' -* *Description* - Queries the server for current owner -* *Paramters* - None +* *Description* - Queries the server for a specific port current owner +* *Paramters* - +** *port_id* ['int'] - port id to query for owner * *Result* ['string'] - owner name if exists, otherwise 'none' [source,bash] @@ -429,7 +430,9 @@ Example: "id": "hxjkuwj9", "jsonrpc": "2.0", "method": "get_owner", - "params": null + "params": { + "port_id": 1 + } } 'Response': @@ -447,11 +450,12 @@ Example: === Acquire * *Name* - 'Acquire' * *Valid States* - 'all' -* *Description* - Takes ownership on the device. +* *Description* - Takes ownership over the port * *Paramters* - +** *port_id* ['int'] - port id to take ownership ** *user* ['string'] - User name aquiring the system -** *force* ['boolean'] - force action even if another user is holding the device -* *Result* ['string'] - 'unique' connection handler for future requests +** *force* ['boolean'] - force action even if another user is holding the port +* *Result* ['string'] - 'unique' connection handler for future requests for that port [source,bash] ---- @@ -463,8 +467,9 @@ Example: "jsonrpc": "2.0", "method": "Acquire", "params": { + "user": "itay" + "port_id": 1 "force": false, - "user": "itay" } } @@ -485,6 +490,7 @@ Example: * *Description* - Release owernship over the device * *Paramters* - ** *handler* ['string'] - unique connection handler +** *port_id* ['int'] - port id to release * *Result* ['string'] - "ACK" on success [source,bash] @@ -498,6 +504,7 @@ Example: "method": "release", "params": { "handler": "37JncCHr" + "port_id": 1 } } @@ -622,6 +629,8 @@ Any element in the array can be one of the following object types: | is_big_endian | boolean | should write as big endian or little |================= +TIP: For more information and examples on VM objects please refer to: +link:vm_doc.html[VM examples] ===== Object type 'rx_stats' anchor:rx_stats_obj[] Describes rx stats for the stream @@ -888,7 +897,7 @@ if both are enabled then 10 bytes will be used. * *Description* - Starts the traffic on a specific port. if traffic has already started an error will be returned * *Paramters* ** *handler* ['string'] - unique connection handler -** *port_id* ['array'] - array of port id on which to start traffic +** *port_id* ['int'] - port id on which to start traffic * *Result* ['string'] - "ACK" on success @@ -903,7 +912,7 @@ if both are enabled then 10 bytes will be used. "method": "start_traffic", "params": { "handler": "37JncCHr", - "port_id": [3, 4] + "port_id": 3 } 'Response': @@ -923,7 +932,7 @@ if both are enabled then 10 bytes will be used. * *Description* - Stops the traffic on a specific port. if the port has already started nothing will happen * *Paramters* ** *handler* ['string'] - unique connection handler -** *port_id* ['array'] - array of port id on which to stop traffic +** *port_id* ['int'] - port id on which to stop traffic * *Result* ['string'] - "ACK" on success @@ -938,7 +947,7 @@ if both are enabled then 10 bytes will be used. "method": "stop_traffic", "params": { "handler": "37JncCHr", - "port_id": [3, 4] + "port_id": 3 } } @@ -975,6 +984,7 @@ if both are enabled then 10 bytes will be used. | total_rx_pkts | int | total RX packets | total_rx_bytes | int | total TX bytes | total_tx_bytes | int | total RX bytes +| tx-rx-error | int | total Tx/Rx errors |================= === Get Port Stats @@ -990,11 +1000,15 @@ if both are enabled then 10 bytes will be used. [options="header",cols="1,1,3"] |================= | Field | Type | Description -| active | boolean | 'true' if the port is in transmitting phase -| opackets | int | total output packets -| ipackets | int | total input packets -| obytes | int | total output bytes -| ibytes | int | total input bytes +| status | string | 'down', 'idle' or 'transmitting' +| tx_bps | double | total TX bits per second +| rx_bps | double | total RX bits per second +| tx_pps | double | total TX packets per second +| rx_pps | double | total RX packets per second +| total_tx_pkts | int | total TX packets +| total_rx_pkts | int | total RX packets +| total_rx_bytes | int | total TX bytes +| total_tx_bytes | int | total RX bytes | tx-rx-error | int | total Tx/Rx errors |================= @@ -1012,8 +1026,15 @@ if both are enabled then 10 bytes will be used. [options="header",cols="1,1,3"] |================= | Field | Type | Description -| tx_pkts | int | total TX packets -| rx_pkts | int | total RX packets [only if 'rx_stats' is enabled], o.w always zero +| tx_bps | double | total TX bits per second +| tx_pps | double | total TX packets per second +| total_tx_pkts | int | total TX packets +| total_tx_bytes | int | total TX bytes +| rx_bps | double | total RX bits per second (if 'rx_stats' enabled) +| rx_pps | double | total RX packets per second (if 'rx_stats' enabled) +| total_rx_pkts | int | total RX packets (if 'rx_stats' enabled) +| total_rx_bytes | int | total RX bytes (if 'rx_stats' enabled) +| latency | array | array of 2 ordered elements average, maximum (if 'rx_stats' enabled) |================= -- cgit 1.2.3-korg From cc119ddc3a3fa7e3a3e42bbd49a639598c9c4531 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 17 Sep 2015 15:09:43 +0300 Subject: v1.76 --- release_notes.asciidoc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 1bc0ba88..f5f01d0a 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,14 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.76 == + +=== fix issues: === + +* minor pcap loader issues +* plugin cleanup + + == Release 1.75 == === fix issues: === -- cgit 1.2.3-korg From 2fd99b86993ba5a78e3aae5bbf58ad392dc95175 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 20 Sep 2015 17:50:39 +0300 Subject: add tuple generator support --- trex_book.asciidoc | 2 +- trex_book_basic.asciidoc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 3d5bd449..8f3e4ac5 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -2,7 +2,7 @@ TRex ==== :author: hhaim :email: -:revnumber: 1.70-0.0 +:revnumber: 1.77-0.0 :quotes.++: :numbered: :web_server_url: http://trex-tgn.cisco.com/trex diff --git a/trex_book_basic.asciidoc b/trex_book_basic.asciidoc index d64cea43..ec0c9cf5 100755 --- a/trex_book_basic.asciidoc +++ b/trex_book_basic.asciidoc @@ -3190,7 +3190,7 @@ So if the m is set as 1, the total PPS is : 102*2+50*20 = 1204 PPS. The BPS depends on the packet size. You can refer to your packet size and get the BPS = PPS*Packet_size. -==== Roadmap of Client/Server IP allocation +==== Client/Server IP allocation We have several features under development for IP allocation. @@ -3247,7 +3247,7 @@ The YAML configuration is something like this: - *2) More distributions will be supported (normal distribution, random distribution, etc)* -Currently, only sequcence is supported. +Currently, only sequcence and random are supported. - *3) Histogram of tuple pool will be supported* -- cgit 1.2.3-korg From 6347fdcd8dca0824ac8baa217f59d410a75e085b Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 20 Sep 2015 18:03:09 +0300 Subject: v1.77 --- release_notes.asciidoc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index f5f01d0a..b4496213 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,11 @@ ifdef::backend-docbook[] endif::backend-docbook[] + +== Release 1.77 == + +* improve tuple generator capability now it is more flexiable see more link:trex_manual.html#_clients_servers_ip_allocation_scheme[here] + == Release 1.76 == === fix issues: === @@ -34,10 +39,6 @@ endif::backend-docbook[] * First version that works from GitHub/Git - init script are in the output package -== Release 1.75 == - -* This version does not work, no init script - == Release 1.72 == -- cgit 1.2.3-korg From 33935c6fde9d875d72d8213e12e86fa64927044d Mon Sep 17 00:00:00 2001 From: imarom Date: Mon, 21 Sep 2015 13:09:08 +0300 Subject: few little fixes --- trex_rpc_server_spec.asciidoc | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index c08447dd..94a95bfb 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -539,15 +539,15 @@ The format of that object is as follows: .Object type 'stream' [options="header",cols="1,1,3"] |================= -| Field | Type | Description -| enabled | boolean | is this stream enabled -| self_start | boolean | is this stream triggered by starting injection or triggered by another stream -| isg | double | ['usec'] inter stream gap - delay time in usec until the stream is started -| next_stream | int | next stream to start after this stream. -1 means stop after this stream -| packet | object | object of type xref:packet_obj['packet'] -| mode | object | object of type xref:mode_obj['mode'] -| vm | array | array of objects of type xref:vm_obj['vm'] -| rx_stats | object | object of type xref:rx_stats_obj['rx_stats'] +| Field | Type | Description +| enabled | boolean | is this stream enabled +| self_start | boolean | is this stream triggered by starting injection or triggered by another stream +| isg | double | ['usec'] inter stream gap - delay time in usec until the stream is started +| next_stream_id | int | next stream to start after this stream. -1 means stop after this stream +| packet | object | object of type xref:packet_obj['packet'] +| mode | object | object of type xref:mode_obj['mode'] +| vm | array | array of objects of type xref:vm_obj['vm'] +| rx_stats | object | object of type xref:rx_stats_obj['rx_stats'] |================= ===== Object type 'packet' anchor:packet_obj[] @@ -982,9 +982,9 @@ if both are enabled then 10 bytes will be used. | rx_pps | double | total RX packets per second | total_tx_pkts | int | total TX packets | total_rx_pkts | int | total RX packets -| total_rx_bytes | int | total TX bytes -| total_tx_bytes | int | total RX bytes -| tx-rx-error | int | total Tx/Rx errors +| total_tx_bytes | int | total TX bytes +| total_rx_bytes | int | total RX bytes +| tx_rx_error | int | total Tx/Rx errors |================= === Get Port Stats @@ -1009,7 +1009,7 @@ if both are enabled then 10 bytes will be used. | total_rx_pkts | int | total RX packets | total_rx_bytes | int | total TX bytes | total_tx_bytes | int | total RX bytes -| tx-rx-error | int | total Tx/Rx errors +| tx_rx_error | int | total Tx/Rx errors |================= === Get Stream Stats -- cgit 1.2.3-korg From 8de7a0225f9c4487be5e8ddb9a34d4c2eff6db8e Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Mon, 28 Sep 2015 14:00:01 +0300 Subject: packet builder docs v0.04 --- packet_builder_yaml.asciidoc | 468 +++++++++++++++++++++---------------------- wscript | 6 + yaml/headers.yaml | 429 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 663 insertions(+), 240 deletions(-) create mode 100755 yaml/headers.yaml diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc index aefbe1c7..5d824a58 100644 --- a/packet_builder_yaml.asciidoc +++ b/packet_builder_yaml.asciidoc @@ -1,8 +1,8 @@ Packet Builder Language ======================= :author: hhaim -:email: -:revnumber: 0.03 +:email: +:revnumber: 0.04 :quotes.++: :numbered: @@ -10,13 +10,13 @@ Packet Builder Language [options="header",cols="^1,^h,a"] |================= -| Version | name | meaning +| Version | name | meaning | 0.01 | hhaim | -- first version -| 0.02 | hhaim +- first version +| 0.02 | hhaim | -- change the bool fields to properties -- add external/internal property +- change the bool fields to properties +- add external/internal property - add const property ( instead cant_change) - change TLV property - now learn the prev header - add choice of next protocol that is not base on a field ( TCP->IP->TCP) @@ -26,11 +26,17 @@ Packet Builder Language - add gui_representation class with data_type, form_type, combobox_values, data_type_regexp items to describe GUI view of field - rename choice attribute to value_based_next_header - fixed some typos +| 0.04 | ybrustin +| +- change value_based_next_header, combobox_values (to be consistent with value_based_next_header) to dictionary +- added value_based_next_class for options +- move 'help' attribute to gui_representation +- add link to headers.yaml (references at bottom of the page) |================= -== A file format for GUI packet builder +== A file format for GUI packet builder === Introduction @@ -39,16 +45,16 @@ We would like a file that will be read by GUI and will give us the ability to bu The format should be *YAML* -=== High Level Requirement +=== High Level Requirement -* Define a YAML object format for dynamic building of packets and a program that change various fields +* Define a YAML object format for dynamic building of packets and a program that change various fields * Ability to *parse* back the same buffer that was created using this tool (reversibility) -** Ability to load packet from a pcap file and parse it +** Ability to load packet from a pcap file and parse it * Ability to save the packet to a pcap file * Ability to save the packet and program in JSON format (same JSON-RPC format) * Set a value for any field of any protocol * Vary packet fields across packets at run time e.g. changing IP/MAC addresses -* Stack protocols in any arbitrary order define in YAML format +* Stack protocols in any arbitrary order define in YAML format === Header that should be supported (first phase) @@ -57,13 +63,13 @@ The format should be *YAML* * Ethernet * 802.3 * LLC SNAP -*VLAN (with QinQ) stack -*MPLS stack +* VLAN (with QinQ) stack +* MPLS stack ==== L3 -* ARP -* IPv4 +* ARP +* IPv4 * IPv6 (4x header) * IP-in-IP a.k.a IP Tunnelling (6over4, 4over6, 4over4, 6over6) @@ -71,52 +77,52 @@ The format should be *YAML* * TCP * UDP -* ICMPv4 +* ICMPv4 * ICMPv6 * IGMP ==== L7 anchor:Payload[] * Any text based protocol (HTTP, SIP, RTSP, NNTP etc.) -** random string -** repeat string +** random string +** repeat string -* Pattern Binary +* Pattern Binary ** repeat of value (e.g 0x55) -** random +** random ** seq (1,2,3,3,4) -** User Hex Dump editor +** User Hex Dump editor -=== YAML Format +=== YAML Format ==== Header section .Default Types anchor:Types[] [options="header",cols="1,2,3"] |================= -| Field Name | meaning | size in bits -| bit | describe the header object e.g tcp | 1 -| uint8 | describe the header object e.g tcp | 8 -| uint16 | the name in the GUI | 16 -| uint32 | sub fields of this header | 32 -| uint64 | sub fields of this header | 64 +| Field Name | meaning | size in bits +| bit | describe the header object e.g tcp | 1 +| uint8 | describe the header object e.g tcp | 8 +| uint16 | the name in the GUI | 16 +| uint32 | sub fields of this header | 32 +| uint64 | sub fields of this header | 64 | other class type | name of other class. for example, "c-mac-addr"; take fields from there, optionally overload them later | The size taken from that class -| Payload | xref:Payload[Payload] | total packet size - all header until now -| vlen_t | in case of varible size header this include the size to the end of varible size header see example xref:IpvOption[Ipv4Option] |total size of the object +| Payload | xref:Payload[Payload] | total packet size - all header until now +| vlen_t | in case of varible size header this include the size to the end of varible size header see example xref:IpvOption[Ipv4Option] |total size of the object |================= .Default Data_Type anchor:Data_Type[] [options="header",cols="1,2"] |================= -| Field Name | meaning +| Field Name | meaning | none | use Hex Editor as Types | ipv4_t | 4 decimals 0-255 each | mac_addr_t | ([0-9a-fA-F]\{2\}:)\{5\}[0-9a-fA-F]\{2\} -| ipv4_mask_t | should match uint32_t type -| ipv6_t | should have 16 bytes field size 8x16 -| ipv6_mask_t | should have 16 bytes field size 8x16 +| ipv4_mask_t | should match uint32 type +| ipv6_t | should have 16 bytes field size 8x16 +| ipv6_mask_t | should have 16 bytes field size 8x16 | another header class | sub fields of this header | char_t | array of bytes , look into the array_size of cost string | var_char_t | array based on a field value look into @@ -137,24 +143,26 @@ The format should be *YAML* .Default Gui_Representation anchor:Gui_Representation[] [options="header",cols="1,^1,5,^1,10"] |================= -| Field Name | value type | meaning | Link | Additional info -| data_type | string | how to represent data | xref:Data_Type[Data_Type] | data_type could get data_type_regexp e.g data_type = "ipv4"; data_type = "regexp" data_type_regexp = "string that define regexp and Java function" -| form_type | string | which editing form to use | xref:Form_Type[Form_Type] | for example for ip address use combobox with option to edit value manually or choose: key "localhost" value "127.0.0.1" etc. -| combobox_values | array | pairs of 'key - value' for combo_with/without_edit | | -| data_type_regexp | string | in case it is reg_exp the name of the function | xref:GenRegExp[GenRegExp] | +| Field Name | value type | meaning | Link | Additional info +| help | string | the name in the GUI | | +| data_type | string | how to represent data | xref:Data_Type[Data_Type] | data_type could get data_type_regexp e.g data_type = "ipv4"; data_type = "regexp" data_type_regexp = "string that define regexp and Java function" +| form_type | string | which editing form to use | xref:Form_Type[Form_Type] | for example for ip address use combobox with option to edit value manually or choose: key "localhost" value "127.0.0.1" etc. +| combobox_values | dictionary | pairs of 'key - value' for combo_with/without_edit | | +| data_type_regexp | string | in case it is reg_exp the name of the function | xref:GenRegExp[GenRegExp] | |================= .Default Properties anchor:Properties[] [options="header",cols="1,7"] |================= -| Field Name | meaning -| ipv4_checksum | auto calculates checksum on this header Ipv4 type -| tcp_udp_checsum | calculate next TCP checksum -| ipv4_total_length | calculate ipv4 total length this pkt_size = header + reset of packet +| Field Name | meaning +| ipv4_checksum | auto calculates checksum on this header Ipv4 type +| tcp_checksum | calculate TCP checksum +| udp_checksum | calculate UDP checksum +| ipv4_total_length | calculate ipv4 total length this pkt_size = header + reset of packet | tlv | TLV length of the header (inlcudes the prev field length) example ip-option, tcp-option | le | little endian. deault is big -| const | const field for example the 4 version of ipv4 header - this GUI won't give option to change this field +| const | const field for example the 4 version of ipv4 header - this GUI won't give option to change this field | external | marks the header as an external header for the GUI. for example IPv4 is external header and mac-addr is internal header ( compose external header) |================= @@ -163,57 +171,60 @@ The format should be *YAML* [options="header",cols="1,^1,30,^1,^1,30"] |================= | Field Name | value type | meaning | Default Value | Link | Example -| class | string | describe the class type | in case class is defined no need to have name and vise versa | | class : tcp -| name | string | describe the instance name | in case class is defined no need to have name and vise versa | | name : tcp -| help | string | the name in the GUI | no | | class TCP, Layer 4 -| array_size | integer | how many objects of this type, default value is 1 | 1 | | array_size : 6 in case of mac-addr +| class | string | describe the class type | in case class is defined no need to have name and vise versa | | class : tcp +| name | string | describe the instance name | in case class is defined no need to have name and vise versa | | name : tcp +| array_size | integer | how many objects of this type, default value is 1 | 1 | | array_size : 6 in case of mac-addr | type | string | type, see Types define the size | "uint8_t" | xref:Types[Types] | type : "uint32_t" type : "mac_addr" -| gui_representation | object | description of how to view/edit data in GUI | | xref:Gui_Representation[Gui_Representation] | xref:Gui_Representation_Example[Gui_Representation_Example] -| default | array/value | default value in the packets , you can override value for subfields in parent see example +| gui_representation | dictionary | description of how to view/edit data in GUI | | xref:Gui_Representation[Gui_Representation] | xref:Gui_Representation_Example[Gui_Representation_Example] +| default | array/value | default value in the packets , you can override value for subfields in parent see example | [0 ]x header size | | xref:Overide_Subfields_Example[Overide_Subfields_Example] -| properies | array of string like masks +| properies | array of string like masks | properies of this fields | [] | xref:Properties[Properties] | ["le","external"] , ["tlv","le","const"] -| value_based_next_header | array | define the next protocol based on a field value | none | xref:Value_Based_Next_Header[Value_Based_Next_Header] | -| next_headers | string or type | a name of class that define the next or just an array | "none" | xref:Next_headers[Next_headers] | -| fields | array | array of Field_Type | [] | | fields : [ ] -| offset | integer/string | offset into the packet in bits, in case of auto add base of prev fields | "auto" | | -| option | string | a java code that define a way to calculate varible size | "none" | | | +| value_based_next_header | dictionary | define the next protocol based on a field value | none | xref:Value_Based_Next_Header[Value_Based_Next_Header] | +| value_based_next_class | dictionary | define the next class based on a field value (useful for options) | none | xref:Value_Based_Next_Class[Value_Based_Next_Class] | +| next_headers | string or type | a name of class that define the next or just an array | "none" | xref:Next_headers[Next_headers] | +| fields | array | array of Field_Type | [] | | fields : [ ] +| offset | integer/string | offset into the packet in bits, in case of auto add base of prev fields | "auto" | | +| option | string | a java code that define a way to calculate varible size | "none" | | | |================= .Field_Type anchor:ConstHeadesClass[] [options="header",cols="^1,^10"] |================= -| Field Name | value type +| Field Name | value type | "root" | the root pointer to the start of blocks L2/802.3 etc | "end" | end TLV headers | "payload" | the rest of the packets as buffer/string etc |================= - + .Next_headers anchor:Next_headers[] Example of Next_headers [source,python] ---- - class : "next-example-t-1" - help : "next-example-t-1" - next_headers : ["ip","ipv6,"tcp"] + gui_representation: + help : "next-example-t-1" + next_headers : ["ipv4", "ipv6, "tcp"] # option 1 define in the header itself - class : "tcp" - help : "TCP header" + gui_representation: + help : "TCP header" properies : ["external"] - next_headers : ["ip","ipv6,"tcp"] - fields : + next_headers : ["ipv4", "ipv6, "tcp"] + fields : - name : "ver" -# option 2 define throw a class +# option 2 define throw a class - class : "tcp" - help : "TCP header" + gui_representation: + help : "TCP header" properies : ["external"] - next_headers : "next-example-t-1" # - fields : + next_headers : "next-example-t-1" # + fields : - name : "ver" ---- @@ -222,42 +233,37 @@ Example of Next_headers Example of value_based_next_header [source,python] ---- - fields : - - key : 0x0800 - val : "ip" # name of an external or internal class , the GUI should distinct betwean internal and external - - - key : 0x86DD - val : "ipv6" - - - key : 0x8100 - val : "vlan" - - - key : 0x8847 - val : "mpls" # unicast - default : "payload" # if no match for any of above + value_based_next_header: + 0x0800: 'ipv4'# name of an external or internal class , the GUI should distinct betwean internal and external + 0x0806: 'arp' + 0x86DD: 'ipv6' + 0x8100: 'vlan' + 0x8847: 'mpls unicast' + default: 'payload' # if no match for any of above + ---- .Generic RegExp Edit Field anchor:GenRegExp[] -This will define a regexp that match for user input and how to converts it to buffer of bytes +This will define a regexp that match for user input and how to converts it to buffer of bytes [source,python] ---- class MyClass : public RegExpBase { public: - + string get_reg_exp_string( ) { return ((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})) } - - # in case of match + + # in case of match buffer get_buffer(){ g= [get_group()[1].to_int()*256,get_group()[1].to_int()] - # return list - return (g) + # return list + return (g) } } @@ -275,15 +281,16 @@ There would be a root object to point to possible starting headers ---- - class : "root" - help : "Root" - next_headers : [ "ethrenet" ,"llc","_802-3"] + gui_representation: + help : "Root" + next_headers : [ "ethernet", "llc", "_802-3"] ---- So in a way you could define a tree like this - + [source,python] ---- - + root -> L2 ( Ethernet , 802.3 , LLC SNAP ) |( by field ) | @@ -298,22 +305,22 @@ root -> L2 ( Ethernet , 802.3 , LLC SNAP ) ---- -==== Rules +==== Rules -* The size of the header and offset is automatically defined in default by the order of the fields ( inc by type size multiply by array_size) +* The size of the header and offset is automatically defined in default by the order of the fields ( inc by type size multiply by array_size) * It can be overrided by offset field ( put offset in the object ) and then an more advanced field can be shown earlier in the GUI * The packet size is defined before the headers. Header Should not be allowed to be added if the size + header size is bigger than packet size * "Payload" is predefined Fields that take the reset of the packet and user can edit it ( see xref:Payload[Payload] ) -* There would be a spare field in the Stream object so GUI could add more metadata for reconstructing the builder types - for example in this example Ethrenet/IP/TCP/IP/TCP you can't extrac from buffer alone that Payload is IP/TCP only the builder known that in build time. +* There would be a spare field in the Stream object so GUI could add more metadata for reconstructing the builder types + for example in this example Ethrenet/IP/TCP/IP/TCP you can't extrac from buffer alone that Payload is IP/TCP only the builder known that in build time. * Ip total length need to keep the total_pkt_size - this ip header . this should work for internal header too. -* When GUI add header ("external") the total size of this header should be calculated ( varible size should be given a default - ipv4) +* When GUI add header ("external") the total size of this header should be calculated ( varible size should be given a default - ipv4) -=== Examples +=== Examples -==== TLV (Ip option) anchor:IpvOption[] +==== TLV (Ip option) anchor:IpvOption[], value_based_next_class anchor:Value_Based_Next_Class[] IP-option see link:http://tools.ietf.org/html/rfc791[ip_option] @@ -322,59 +329,55 @@ IP-option see link:http://tools.ietf.org/html/rfc791[ip_option] 1 : Length 1 -other : Byte : Length ( +first) |option - +other : Byte : Length ( +first) |option + [source,python] ---- - class : "ip_option_131" - help : "ip_option" - fields : + gui_representation: + help : "ip_option" + fields : - name : "length" # tree with leaf of bits - help : "length" + gui_representation: + help : "length" type : uint8 properties : ["tlv"] # the length include the prev field size (8 byte) - name : "pointer" # tree with leaf of bits type : uint8 - + - name : "buffer" # tree with leaf of bits type : "tlv_reset" - class : "default_ip4_option_tlv" - help : "ip_option" - fields : + gui_representation: + help : "ip_option" + fields : - name : "length" # tree with leaf of bits - help : "length" + gui_representation: + help : "length" type : uint8 properties : "tlv" # the length include the prev field size (8 byte) - + - name : "buffer" # tree with leaf of bits type : "vlen_t" - + - class : "ip_option" - help : "ip_option" + gui_representation: + help : "ip_option" type : uint8 default : [0x01] - value_based_next_header : - fields: - - key : 0x00 - val : "end" # reserve name for ending the loop - - - key : 0x01 - val : "ip_option" # back to this header + value_based_next_class : + 0x00 : "end" # reserve name for ending the loop + 0x01 : "ip_option" # back to this header + 0x131 : "ip_option_131" + 0x0812: "gre" + default : "default_ip4_option_tlv" - - key : 0x131 - val : "ip_option_131" - - - key : 0x0812 - val : "gre" - - default : "default_ip4_option_tlv" - ---- @@ -389,151 +392,145 @@ other : Byte : Length ( +first) |option ---- - class : "c-mac-addr" - help : "Mac addrees" type : "uint8" - array_size : 6 + array_size : 6 default : [0x00, 0x00, 0x01, 0x00, 0x00, 0x00] - gui_representation: + gui_representation: data_type : "mac-addr_t" # format ([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2} + help : "Mac addrees" - class : "ethernet" - help : "Ethernet-L2" + gui_representation: + help : "Ethernet-L2" properties: ['external'] - fields : + fields : - name : "Dst" - help : "destination mac" + gui_representation: + help : "destination mac" type : "c-mac-addr" - + - name : "Src" - help : "source mac" + gui_representation: + help : "source mac" type : "c-mac-addr" - + - name: "Ethertype" - help: "Ethertype" + gui_representation: + help: "Ethertype" type: "uint16" default: [0x0800] - value_based_next_header : - fields : - - key : 0x0800 - val : "ip" - - - key : 0x86DD - val : "ipv6" - - - key : 0x8100 - val : "vlan" - - - key : 0x8847 - val : "mpls" #unicast + value_based_next_header : + 0x0800 : "ipv4" + 0x86DD : "ipv6" + 0x8100 : "vlan" + 0x8847 : "mpls" #unicast default : "payload" - + - class : "ipv4" - help : "Ipv4" - fields : + gui_representation: + help : "Ipv4" + fields : - name : "ver" - help : "Version" + gui_representation: + help : "Version" type : "bit" - array_size : 4 + array_size : 4 default : [0, 1, 0, 0] properties : ["const"] - name : "ihl" - help : "IHL" type : "bit" - array_size : 4 + array_size : 4 default : [0, 1, 1, 1] properties : ["ipv4_ihl"] gui_representation: + help : "IHL" form_type: "checkbox" .. - name : "hdr_chsum" - help : "Header Checksum" + gui_representation: + help : "Header Checksum" default : [0x00,0x00] properties : ["ipv4_check_sum", "const"] - name : "total_len" - help : "Total Length" + gui_representation: + help : "Total Length" default : [0x00,0x00] properties : ["ipv4_total_len", "const"] # auto calculate total_size-offset_header - + - name : "protocol" - help : "Protocol" type : uint8 default : 0x06 value_based_next_header : &ipv4_next_header - fields: - - key : 0x06 - val : "tcp" - - - key : 0x11 - val : "udp" - - - key : 0x29 - val : "ipv6" - - - key : 0x2F - val : "gre" - default : "payload" + 0x06 : "tcp" + 0x11 : "udp" + 0x29 : "ipv6" + 0x2F : "gre" + default : "payload" gui_representation: + help : "IPv4 next Protocol" form_type: "combo_without_edit" combobox_values: <<: *ipv4_next_header # take same choices as value_based_next_header - name : "src_addr" - help : "Source Address" type : uint32 default : [16, 0, 0, 0] - gui_representation: + gui_representation: + help : "Source Address" data_type : "ipv4" # reserve - name : "dst_addr" - help : "Destination Address" default : [48, 0, 0, 0] type : uint32 - gui_representation: + gui_representation: + help : "Destination Address" data_type : "ipv4" # reserve form_type : "combo_with_edit" combobox_values: - fields: - - key : [127, 0, 0, 1] - - value : "localhost" - - - key : [255, 255, 255, 255] - - value : "broadcast" + [127, 0, 0, 1]: 'localhost' + [255, 255, 255, 255]: 'broadcast' - class : "tcp" - help : "TCP" - properties : ["external"] - fields : + gui_representation: + help : "TCP" + properties : ["external"] + fields : - name : "src_port" - help : "Source Port" + gui_representation: + help : "Source Port" default : [0x30,0x00] type : uint16 - name : "dest_port" - help : "Source Port" + gui_representation: + help : "Source Port" default : [0x30,0x00] type : uint16 - name : "seq" - help : "Seq Number" + gui_representation: + help : "Seq Number" type : uint32 default : [0x30,0x00,00,00] - name : "ack" - help : "Ack Number" + gui_representation: + help : "Ack Number" type : uint32 default : [0x30,0x00,00,00] ... - + - name : "flags" # tree with leaf of bits - help : "Ack Number" + gui_representation: + help : "Ack Number" type : uint8 default : [0x30] fields : @@ -547,18 +544,20 @@ other : Byte : Length ( +first) |option type : bit default : [0x1] .. - + - name : "checksum" - help : "Checksum" + gui_representation: + help : "TCP Checksum" type : uint16 default : [0x00,0x00] properties : ["tcp_checksum"] # auto calculate total_size-offset_header - class : "root" # reserve - help : "Root" - next_headers : [ "ethrenet" ,"llc","_802-3"] ---------------------------- + gui_representation: + help : "Root" + next_headers : [ "ethrenet" ,"llc","_802-3"] +--------------------------- ==== Overide subfields example anchor:Overide_Subfields_Example[] @@ -569,43 +568,38 @@ In this example parent class default value overrides default values of sub-field ---- - class : "c-mac-addr" - help : "Mac addrees" type : "uint8" - array_size : 6 - gui_representation: + array_size : 6 + gui_representation: + help : "Mac addrees" data_type : "mac-addr_t" # format ([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2} default : [0x00,0x00,0x01,0x00,0x00,0x00] - + - class : "ethernet" - help : "Ethernet-L2" - properties : ["external"] + gui_representation: + help : "Ethernet-L2" + properties : ["external"] default : [0x00,0x01,0x01,0x00,0x00,0x00, 0x00,0x02,0x02,0x00,0x00,0x00 ,0x08,00] # change the default of sub-fields . it is const size - fields : + fields : - name : "Dst" - help : "destination mac" + gui_representation: + help : "destination mac" type : "c-mac-addr" - + - name : "Src" - help : "source mac" + gui_representation: + help : "source mac" type : "c-mac-addr" - + - name : "ip_protocol" type : "uint16_t" default : [0x08,0x00] - value_based_next_header : - fields : - - key : 0x0800 - val : "ip" - - - key : 0x86DD - val : "ipv6" - - - key : 0x8100 - val : "vlan" - - - key : 0x8847 - val : "mpls unicast" + value_based_next_header : + 0x0800 : "ipv4" + 0x86DD : "ipv6" + 0x8100 : "vlan" + 0x8847 : "mpls unicast" default : "payload" ---- @@ -614,42 +608,34 @@ In this example parent class default value overrides default values of sub-field [source,python] ---- - name: 'Flags' - help: 'IPv4 Flags' type: 'bit' array_size: 3 - gui-representation: + gui_representation: + help: 'IPv4 Flags' form_type: 'checkbox' # can check each bit - name: 'dst_addr' - help: 'IPv4 Destination Address' default: [48, 0, 0, 0] type: uint32 - gui-representation: + gui_representation: + help: 'IPv4 Destination Address' data_type: 'ipv4_t' # special representation case, show as 4 decimal numbers form_type: 'combo_with_edit' # can choose from pre-defined values or edit manually combobox_values: - fields: - - key: [127, 0, 0, 1] - - val: 'localhost' - - - key: [255, 255, 255, 255] - - val: 'broadcast' + [127, 0, 0, 1]: 'localhost' + [255, 255, 255, 255]: 'broadcast' - name: 'protocol' - help: 'IPv4 Protocol Field' type: uint8 default: 0x06 value_based_next_header: &ipv4_next_header - fields: - - key: 0x06 - val: 'tcp' - - - key: 0x11 - val: 'udp' - default : "payload" - gui-representation: + 0x06: 'tcp' + 0x11: 'udp' + default : "payload" + gui_representation: + help: 'IPv4 Protocol Field' form_type: 'combo_without_edit' # choose from supported protocols, no manual edit combobox_values: <<: *ipv4_next_header # take same choices as value_based_next_header @@ -669,14 +655,16 @@ choosing from predefined values: image:images/combo_button_choosing.jpg[] -==== Union base +==== Union base TBD === Resource +* link:yaml/headers.yaml[headers.yaml] * link:https://wireedit.com/[WireEdit] * link:http://ostinato.org/[ostinato] * link:http://www.slideshare.net/nlekh/ixiaexplorer[IxExplorer] + diff --git a/wscript b/wscript index 900f7294..3302ec58 100755 --- a/wscript +++ b/wscript @@ -139,6 +139,12 @@ def build(bld): bld(rule=my_copy, target=x) bld.add_group() + + for x in bld.path.ant_glob('yaml\\**\**.yaml'): + bld(rule=my_copy, target=x) + bld.add_group() + + for x in bld.path.ant_glob('video\\**\**.mp4'): bld(rule=my_copy, target=x) bld.add_group() diff --git a/yaml/headers.yaml b/yaml/headers.yaml new file mode 100755 index 00000000..d22eedc1 --- /dev/null +++ b/yaml/headers.yaml @@ -0,0 +1,429 @@ +# This yaml describes packet headers and their fields + + +####################################################################### +# Root, reserved starting point + + + - class: 'root' + gui_representation: + help: 'Root' + next_headers: ['ethernet', 'llc', '_802-3', 'ipv4'] + + +####################################################################### +# Protocols + + +# L2 + + + - class: 'ethernet' + gui_representation: + help: 'Ethernet-L2' + properties: ['external'] + default: [0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x00, 0x08, 0x00] + fields: + - name: 'Dst' + gui_representation: + help: 'Destination MAC' + type: 'c-mac-addr' + default: [0x77, 0x55, 0x01, 0x00, 0x00, 0x01] + + - name: 'Src' + gui_representation: + help: 'Source MAC' + type: 'c-mac-addr' + + - name: 'Ethertype' + gui_representation: + help: 'Ethertype' + type: 'uint16' + default: [0x0800] + value_based_next_header: + 0x0800: 'ipv4' + # not implemented + # 0x0806: 'arp' + # 0x86DD: 'ipv6' + # 0x8100: 'vlan' # field of 4 bytes added in this case, what to do? + # 0x8847: 'mpls unicast' # unicast or multicast? multicast is 0x8848 + default: 'payload' + + +# L3 + + + - class: 'ipv4' + gui_representation: + help: 'IPv4' + properties: ['external'] + fields: + - name: 'ver' + gui_representation: + help: 'Version' + type: 'c-bit' + array_size: 4 + default: [0, 1, 0, 0] + properties: ['const'] + + - name: 'ihl' + gui_representation: + help: 'IHL' + type: 'c-bit' + array_size: 4 + default: [0, 1, 0, 1] + properties: ['ipv4_ihl'] + + - name: 'DSCP' + gui_representation: + help: 'Differentiated Services Code Point' + type: 'c-bit' + array_size: 6 + default: [0, 0, 0, 0, 0, 0] + + - name: 'ECN' + gui_representation: + help: 'Explicit Congestion Notification' + type: 'c-bit' + array_size: 2 + default: [0, 0] + + - name: 'total_len' + gui_representation: + help: 'Total Length' + type: 'uint8' + array_size: 2 + default: [0, 57] + properties: ['total_length'] + + - name: 'Identification' + gui_representation: + help: 'Identification' + type: 'uint8' + array_size: 2 + default: [0x00, 0x00] + + - name: 'Flags' + gui_representation: + help: 'IPv4 Flags' + type: 'c-bit' + array_size: 3 + default: [0, 0, 0] + fields: + - name: 'Reserved' + gui_representation: + help: 'Reserved' + type: 'bit' + default: 0 + properties: ['const'] + + - name: 'DF' + gui_representation: + help: "Don't Fragment" + type: 'bit' + default: 0 + + - name: 'MF' + gui_representation: + help: 'More Fragments' + type: 'bit' + default: 0 + + - name: 'Fragment Offset' + gui_representation: + help: 'Fragment Offset' + type: 'c-bit' + array_size: 13 + default: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + - name: 'TTL' + gui_representation: + help: 'Time To Live' + type: 'uint8' + default: 0x80 + + - name: 'protocol' + type: 'uint8' + default: 0x06 + value_based_next_header: &ipv4_next_header + 0x06: 'tcp' + 0x11: 'udp' + # not implemented + # 0x29: 'ipv6' + # 0x2F: 'gre' + default: 'payload' + gui_representation: + help: 'IPv4 next protocol field' + combobox_values: + <<: *ipv4_next_header + 0x11: 'Next header is UDP' # overrides the description for combo-box + + - name: 'ipv4_chksum' + gui_representation: + help: 'IPv4 Header Checksum' + type: 'uint16' + default: [0x0000] + properties: ['ipv4_checksum', 'const'] + + - name: 'Src' + gui_representation: + help: 'Source IPv4' + type: 'c-ipv4-addr' + + - name: 'Dst' + gui_representation: + help: 'Destination IPv4' + type: 'c-ipv4-addr' + + # not implemented + # - name: 'Opts' # presence of this field and it's size should be determined in python/java + # help: 'IPv4 options' + # type: 'c-ipv4-options' + + +# L4 + + + - class: 'tcp' + gui_representation: + help: 'TCP' + properties: ['external'] + next_headers: ['ipv6', 'ipv4'] + fields: + - name: 'src_port' + gui_representation: + help: 'Source Port' + type: 'c-port-16bit' + + - name: 'dest_port' + gui_representation: + help: 'Destination Port' + type: 'c-port-16bit' + + - name: 'Seq' + gui_representation: + help: 'Sequence number' + type: uint32 + default: 0x30000000 + + - name: 'Ack' + gui_representation: + help: 'Acknowledgment number' + type: uint32 + default: 0x30000000 + + - name: 'Data offset' + gui_representation: + help: 'Data offset' + type: 'c-bit' + array_size: 4 + default: [0, 1, 0, 1] + + - name: 'Reserved' + gui_representation: + help: 'TCP Reserved' + type: 'bit' + array_size: 3 + default: [0, 0, 0] + properties: ['const'] + + - name: 'flags' # tree with leaf of bits + gui_representation: + help: 'TCP flags' + type: 'c-bit' + array_size: 9 + default: [0, 0, 0, 0, 0, 0, 0, 0, 0] + fields: + - name: 'ns' + gui_representation: + help: 'NS flag' + type: 'bit' + default: 0 + + - name: 'cwr' + gui_representation: + help: 'CWR flag' + type: 'bit' + default: 0 + + - name: 'ece' + gui_representation: + help: 'ECE flag' + type: 'bit' + default: 0 + + - name: 'urg' + gui_representation: + help: 'URG flag' + type: 'bit' + default: 0 + + - name: 'ack' + gui_representation: + help: 'ACK flag' + type: 'bit' + default: 0 + + - name: 'psh' + gui_representation: + help: 'PSH flag' + type: 'bit' + default: 0 + + - name: 'rst' + gui_representation: + help: 'RST flag' + type: 'bit' + default: 0 + + - name: 'syn' + gui_representation: + help: 'SYN flag' + type: 'bit' + default: 0 + + - name: 'window_size' + gui_representation: + help: 'Window size' + type: uint16 + default: 0x0000 + + - name: 'checksum' + gui_representation: + help: 'Checksum' + type: uint16 + default: 0x0000 + properties: ['tcp_checksum'] + + - name: 'urgent_pointer' + gui_representation: + help: 'Urgent pointer' + type: uint16 + default: 0x0000 + + # not implemented + # - name: 'tcp options' + # type: 'c-tcp-options' + + + - class: 'udp' + gui_representation: + help: 'UDP' + properties: ['external'] + next_headers: ['ipv6', 'ipv4'] + fields: + - name: 'src_port' + gui_representation: + help: 'Source Port' + type: 'c-port-16bit' + + - name: 'dest_port' + gui_representation: + help: 'Destination Port' + type: 'c-port-16bit' + + - name: 'length' + gui_representation: + help: 'Length' + type: 'uint16' + default: 0x0020 + properties: ['total_length'] + + - name: 'checksum' + gui_representation: + help: 'UDP Checksum' + type: uint16 + default: 0x0000 + properties: ['udp_checksum'] + + +####################################################################### +# Fields + + + - class: 'c-mac-addr' + type: 'uint8' + array_size: 6 + default: [0x00, 0x00, 0x01, 0x00, 0x00, 0x00] + gui_representation: + help: 'MAC address' + data_type: 'mac-addr_t' # format ([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2} + form_type: 'combo_with_edit' + + + - class: 'c-ipv4-addr' + type: 'uint8' + array_size: 4 + default: [1, 1, 1, 1] + gui_representation: + help: 'IPv4 address' + data_type: 'ipv4_t' + form_type: 'combo_with_edit' + + + - class: 'c-port-16bit' + type: 'uint16' + default: 0x1234 + gui_representation: + help: 'ports of TCP/UDP etc.' + form_type: 'combo_with_edit' + + + - class: 'c-bit' + type: 'bit' + gui_representation: + help: 'bit with checkbox representation' + form_type: 'checkbox' + + +###################### +# incomplete + + - class: 'c-ipv4-option' + type: 'vlen_t' + default: 0x01 + fields: + - name: 'copied flag' + help: 'Copy the option to all fragments flag' + type: 'c-bit' + default: 0 + + - name: 'option class' + help: '0 = controls, 2 = debugging' + type: 'bit' + array_size: 2 + default: 0 + + - name: 'option number' + help: 'Option Number' + type: 'bit' + array_size: 5 + value_based_next_class: + 0: 'end' # reserved name for ending options + 1: 'ip_option' # back to itself + 2: 'c-ipv4opt-security' + 3: 'c-ipv4opt-loose-source-routing' + 4: 'c-ipv4opt-internet-timestamp' + 7: 'c-ipv4opt-record-route' + 8: 'c-ipv4opt-stream-id' + 9: 'c-ipv4opt-strict-source-routing' + default: 'end' + + + - class: 'c-ipv4opt-security' + help: 'ipv4 option security' + type: 'bit' + array_size: 11 + default: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + fields: + value_based_next_class: + default: 'c-ipv4-option' + + - class: 'c-ipv4opt-loose-source-routing' + help: 'ipv4 option loose source routing' + type: 'bit' + default: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + fields: + value_based_next_class: + default: 'c-ipv4-option' + -- cgit 1.2.3-korg From abe95553daeb657d6f4465dc8252c8caac01739f Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Tue, 29 Sep 2015 07:01:33 +0300 Subject: fix typo --- yaml/headers.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yaml/headers.yaml b/yaml/headers.yaml index d22eedc1..fd217dc6 100755 --- a/yaml/headers.yaml +++ b/yaml/headers.yaml @@ -8,7 +8,7 @@ - class: 'root' gui_representation: help: 'Root' - next_headers: ['ethernet', 'llc', '_802-3', 'ipv4'] + next_headers: ['ethernet', 'llc', '_802-3'] ####################################################################### @@ -39,7 +39,7 @@ gui_representation: help: 'Ethertype' type: 'uint16' - default: [0x0800] + default: 0x0800 value_based_next_header: 0x0800: 'ipv4' # not implemented -- cgit 1.2.3-korg From 2b6dfe601ebcbe9eb890f8957ef5f1095299ff9b Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Wed, 30 Sep 2015 00:13:40 +0300 Subject: 1. updated with Higher Level implementation examples 2. added changelog to the file --- trex_rpc_server_spec.asciidoc | 209 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 208 insertions(+), 1 deletion(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 94a95bfb..e973aedc 100644 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -2,12 +2,25 @@ The TRex RPC Server =================== :author: Itay Marom :email: -:revnumber: 1.70-0.0 +:revnumber: 1.01 :quotes.++: :numbered: :web_server_url: http://trex-tgn.cisco.com/trex :local_web_server_url: csi-wiki-01:8181/trex +== Change log + +[options="header",cols="^1,^h,3a"] +|================= +| Version | name | meaning +| 1.00 | Itay Marom (imarom) | +- first version +| 1.01 | Dan Klein (danklei) +| +- added usage examples using Python code as Higher-level usage +- added logic and explanation behind VM commands + +|================= == RPC Support On TRex @@ -1084,3 +1097,197 @@ Describes the log off from the machine * *remove_all_streams* ['optional'] - if you want to clear all the previous streams - use this * *release* - release the ownership over the device + +== Higher Level implementation examples +The following examples represents common scenarios implemented by a higher layer, which uses the API described above. + +The examples are written in Python, however similar examples can be shown in any programming language. + +=== CTRexPktBuilder class description +`CTRexPktBuilder` is a Python module designed to provide a progammer API for dynamic packet building. +Since the packet is built to be used by TRex, a `CTRexVM` subclass has been created to describe how TRex should use the described packet in its transmission. + +While the entire `CTRexPktBuilder` class (which is initialized by specifying the total length of the packet) responsible to both building the packet layer by layer, the `CTRexVM` class is responsible for controlling the ranging of the values as desribed in the <>, and other attributes being used by TRex data-plane once the server receives its streams. + + +=== Creating an example packet +The following conde snippet describes how an ICMP Echo packet is built. + +[source, python, numbered] +---- +from packet_builder import CTRexPktBuilder +import dpkt + +pkt_bld = CTRexPktBuilder() # <1> +pkt_bld.add_pkt_layer("l2", dpkt.ethernet.Ethernet()) +# set Ethernet layer attributes +pkt_bld.set_eth_layer_addr("l2", "src", "00:15:17:a7:75:a3") +pkt_bld.set_eth_layer_addr("l2", "dst", "e0:5f:b9:69:e9:22") +pkt_bld.set_layer_attr("l2", "type", dpkt.ethernet.ETH_TYPE_IP) +# set IP layer attributes +pkt_bld.add_pkt_layer("l3_ip", dpkt.ip.IP()) +pkt_bld.set_ip_layer_addr("l3_ip", "src", "21.0.0.2") +pkt_bld.set_ip_layer_addr("l3_ip", "dst", "22.0.0.12") +pkt_bld.set_layer_attr("l3_ip", "p", dpkt.ip.IP_PROTO_ICMP) +# set ICMP layer attributes +pkt_bld.add_pkt_layer("icmp", dpkt.icmp.ICMP()) +pkt_bld.set_layer_attr("icmp", "type", dpkt.icmp.ICMP_ECHO) +# set Echo(ICMP) layer attributes +pkt_bld.add_pkt_layer("icmp_echo", dpkt.icmp.ICMP.Echo()) +pkt_bld.set_layer_attr("icmp_echo", "id", 24528) +pkt_bld.set_layer_attr("icmp_echo", "seq", 11482) +pkt_bld.set_pkt_payload('hello world') +# finally, set IP header len with relation to payload data +pkt_bld.set_layer_attr("l3_ip", "len", len(pkt_bld.get_layer('l3_ip'))) +---- + +<1> Initialize the packet builder instance. + +This example created a packet without any ranging to it, so in this case TRex is expected to reply the same packet over and over without any changes to it. + +When adding sending this packet as part of the <<_add_stream, Add Stream>> command, the packet content specified under `packet` would look for the created ICMP packet like this: + +[source, python] +---- +>>> print pkt_bld.dump_pkt() + [224, 95, 185, 105, 233, 34, 0, 21, 23, 167, 117, 163, 8, 0, 69, 0, 0, 39, + 0, 0, 0, 0, 64, 1, 79, 201, 21, 0, 0, 2, 22, 0, 0, 12, 8, 0, 217, 134, 95, + 208, 44, 218, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100] +---- + +Each of the array items representing a byte data-representation, hence ranging from 0 to 255. + +=== Create a packet with single ranging instruction +The following example creates an HTTP GET packet, hence layering Ethernet/IP/TCP/HTTP. + +[source, python, numbered] +---- +from packet_builder import CTRexPktBuilder +import dpkt + +pkt_bld = CTRexPktBuilder() +pkt_bld.add_pkt_layer("l2", dpkt.ethernet.Ethernet()) +# set Ethernet layer attributes +pkt_bld.set_eth_layer_addr("l2", "src", "00:15:17:a7:75:a3") +pkt_bld.set_eth_layer_addr("l2", "dst", "e0:5f:b9:69:e9:22") +pkt_bld.set_layer_attr("l2", "type", dpkt.ethernet.ETH_TYPE_IP) +# set IP layer attributes +pkt_bld.add_pkt_layer("l3_ip", dpkt.ip.IP()) +pkt_bld.set_ip_layer_addr("l3_ip", "src", "21.0.0.2") +pkt_bld.set_ip_layer_addr("l3_ip", "dst", "22.0.0.12") +pkt_bld.set_layer_attr("l3_ip", "p", dpkt.ip.IP_PROTO_TCP) +# set TCP layer attributes +pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP()) +pkt_bld.set_layer_attr("l4_tcp", "sport", 13311) +pkt_bld.set_layer_attr("l4_tcp", "dport", 80) +pkt_bld.set_layer_attr("l4_tcp", "flags", 0) +pkt_bld.set_layer_attr("l4_tcp", "win", 32768) +pkt_bld.set_layer_attr("l4_tcp", "seq", 0) +# set packet payload, for example HTTP GET request +pkt_bld.set_pkt_payload('GET /10k_60k HTTP/1.1\r\nHost: 22.0.0.3\r\nConnection: Keep-Alive\r\nUser-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\r\nAccept: */*\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate, compress\r\n\r\n') + +# finally, set IP header len with relation to payload data +pkt_bld.set_layer_attr("l3_ip", "len", len(pkt_bld.get_layer('l3_ip'))) +---- + +Now, we extened the single packet created with three VM instructions, in order to range over the source IP of the packet. + +[source, python, numbered] +---- +pkt_bld.set_vm_ip_range(ip_layer_name="l3_ip", # <1> + ip_field="src", # <2> + ip_init="10.0.0.1", ip_start="10.0.0.1", ip_end="10.0.0.255", + add_value=1, + operation="inc") +---- + +<1> `l3_ip` corresponds with the layer name given to the IP layer of the packet. This helps identifying and diffrenciate in packet containing more than one IP header. + +<2> the name of the field on which we want to range. + +Now, we added ranging for source IP starting from 10.0.0.1 to 10.0.0.255. +This will generate the follwing VM instructions, which will be provided under `vm` field of the <<_add_stream, add_stream>> command: + +[source, python] +---- +>>> print pkt_bld.vm.dump(), + [{'name': 'l3__src', 'ins_name': 'flow_var', 'max_value': '167772415', 'min_value': '167772161', 'init_value': '167772161', 'size': 4, 'op': 'inc'}, + {'is_big_endian': False, 'pkt_offset': 26, 'type': 'write_flow_var', 'name': 'l3__src', 'add_value': 1}, + {'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}] +---- + +As we can see, three instructions has been generated for this ranging criteria: + +1. `flow_var` instruction - for defining the ranging parameters. + +2. `write_flow_var` instruction - for specifying where and how the modification should take place. + +3. `fix_checksum_ipv4` instruction - for updated the checksum field + +[WARNING] +The order of the instruction **does matter**. In this example, if the `fix_checksum_ipv4` instruction would have been places prior to the `write_flow_var` instruction, the generated packet would have satyed with the old checksum values. + +[NOTE] +By default, with each change to the IP header, a `fix_checksum_ipv4` instruction is added. This can be canceled by passing `add_checksum_inst=False` in functions which ranges over an IP field. + + +=== Create a packet with multiple ranging instructions +Now, we shall extend our ranging and add another field to range on, this time we'll pick the TOS field of the IP header. + +So, we'll add the following code snippet **ontop of the ranging method we already applied**: + +[source, python, numbered] +---- +pkt_bld.set_vm_custom_range(layer_name="l3_ip", + hdr_field="tos", + init_val="10", start_val="10", end_val="200", add_val=2, val_size=1, + operation="inc") +---- + +So, in this case we chose to range the TOS field from 10 to 200 in steps of 2. + +Finally, let's see the expected JSON output of the VM instructions: + +[source, python] +---- +>>> print pkt_bld.vm.dump() + [{ 'init_value': '167772161', # <1> + 'ins_name': 'flow_var', + 'max_value': '167772415', + 'min_value': '167772161', + 'name': 'l3__src', + 'op': 'inc', + 'size': 4}, + { 'init_value': '10', # <2> + 'ins_name': 'flow_var', + 'max_value': '200', + 'min_value': '10', + 'name': 'l3__tos', + 'op': 'inc', + 'size': 1}, + { 'add_value': 2, # <3> + 'is_big_endian': False, + 'name': 'l3__tos', + 'pkt_offset': 15, + 'type': 'write_flow_var'}, + { 'add_value': 1, # <4> + 'is_big_endian': False, + 'name': 'l3__src', + 'pkt_offset': 26, + 'type': 'write_flow_var'}, + { 'pkt_offset': 14, 'type': 'fix_checksum_ipv4'} # <5> + ] +---- + +<1> `flow_var` instruction for source IP. + +<2> `flow_var` instruction for TOS field + +<3> `write_flow_var` instruction for TOS. + +<4> `write_flow_var` instruction for source IP. + +<5> `fix_checksum_ipv4` instruction for both ranging options + +[NOTE] +In this case only one checksum instruction has been generated, since both ranging options applies to the same IP header. \ No newline at end of file -- cgit 1.2.3-korg From 80282f6f9fb9798b92a581b3963be56e54f5981d Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Thu, 1 Oct 2015 00:29:27 +0300 Subject: fix EOL style --- packet_builder_yaml.asciidoc | 1340 +++++++++++++++++++++--------------------- yaml/headers.yaml | 79 ++- 2 files changed, 745 insertions(+), 674 deletions(-) diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc index 5d824a58..d52ee4c8 100644 --- a/packet_builder_yaml.asciidoc +++ b/packet_builder_yaml.asciidoc @@ -1,670 +1,670 @@ -Packet Builder Language -======================= -:author: hhaim -:email: -:revnumber: 0.04 -:quotes.++: -:numbered: - -== change log - -[options="header",cols="^1,^h,a"] -|================= -| Version | name | meaning -| 0.01 | hhaim | -- first version -| 0.02 | hhaim -| -- change the bool fields to properties -- add external/internal property -- add const property ( instead cant_change) -- change TLV property - now learn the prev header -- add choice of next protocol that is not base on a field ( TCP->IP->TCP) -| 0.03 | ybrustin -| -- add MAC address regexp -- add gui_representation class with data_type, form_type, combobox_values, data_type_regexp items to describe GUI view of field -- rename choice attribute to value_based_next_header -- fixed some typos -| 0.04 | ybrustin -| -- change value_based_next_header, combobox_values (to be consistent with value_based_next_header) to dictionary -- added value_based_next_class for options -- move 'help' attribute to gui_representation -- add link to headers.yaml (references at bottom of the page) - -|================= - - -== A file format for GUI packet builder - -=== Introduction - -We would like a file that will be read by GUI and will give us the ability to build packets using GUI - -The format should be *YAML* - - -=== High Level Requirement - -* Define a YAML object format for dynamic building of packets and a program that change various fields -* Ability to *parse* back the same buffer that was created using this tool (reversibility) -** Ability to load packet from a pcap file and parse it -* Ability to save the packet to a pcap file -* Ability to save the packet and program in JSON format (same JSON-RPC format) -* Set a value for any field of any protocol -* Vary packet fields across packets at run time e.g. changing IP/MAC addresses -* Stack protocols in any arbitrary order define in YAML format - -=== Header that should be supported (first phase) - -==== L2 - -* Ethernet -* 802.3 -* LLC SNAP -* VLAN (with QinQ) stack -* MPLS stack - -==== L3 - -* ARP -* IPv4 -* IPv6 (4x header) -* IP-in-IP a.k.a IP Tunnelling (6over4, 4over6, 4over4, 6over6) - -==== L4 - -* TCP -* UDP -* ICMPv4 -* ICMPv6 -* IGMP - -==== L7 anchor:Payload[] - -* Any text based protocol (HTTP, SIP, RTSP, NNTP etc.) -** random string -** repeat string - -* Pattern Binary -** repeat of value (e.g 0x55) -** random -** seq (1,2,3,3,4) -** User Hex Dump editor - - -=== YAML Format - -==== Header section - -.Default Types anchor:Types[] -[options="header",cols="1,2,3"] -|================= -| Field Name | meaning | size in bits -| bit | describe the header object e.g tcp | 1 -| uint8 | describe the header object e.g tcp | 8 -| uint16 | the name in the GUI | 16 -| uint32 | sub fields of this header | 32 -| uint64 | sub fields of this header | 64 -| other class type | name of other class. for example, "c-mac-addr"; take fields from there, optionally overload them later | The size taken from that class -| Payload | xref:Payload[Payload] | total packet size - all header until now -| vlen_t | in case of varible size header this include the size to the end of varible size header see example xref:IpvOption[Ipv4Option] |total size of the object -|================= - - -.Default Data_Type anchor:Data_Type[] -[options="header",cols="1,2"] -|================= -| Field Name | meaning -| none | use Hex Editor as Types -| ipv4_t | 4 decimals 0-255 each -| mac_addr_t | ([0-9a-fA-F]\{2\}:)\{5\}[0-9a-fA-F]\{2\} -| ipv4_mask_t | should match uint32 type -| ipv6_t | should have 16 bytes field size 8x16 -| ipv6_mask_t | should have 16 bytes field size 8x16 -| another header class | sub fields of this header -| char_t | array of bytes , look into the array_size of cost string -| var_char_t | array based on a field value look into -| regexp_t | define a Java function that converts a reg exp string to a buffer see here xref:GenRegExp[RegExp] -|================= - -.Default Form_Type anchor:Form_Type[] -[options="header",cols="1,3"] -|================= -| Field Name | meaning -| none | simple editing field -| combo_with_edit | combo box with predefined choices, can edit the field value manually -| combo_without_edit | combo box with predefined choices, can [underline]#not# edit the field value manually -| checkbox | toggle bits values, if item is array of bits, display several checkboxes per number of bits -|================= - - -.Default Gui_Representation anchor:Gui_Representation[] -[options="header",cols="1,^1,5,^1,10"] -|================= -| Field Name | value type | meaning | Link | Additional info -| help | string | the name in the GUI | | -| data_type | string | how to represent data | xref:Data_Type[Data_Type] | data_type could get data_type_regexp e.g data_type = "ipv4"; data_type = "regexp" data_type_regexp = "string that define regexp and Java function" -| form_type | string | which editing form to use | xref:Form_Type[Form_Type] | for example for ip address use combobox with option to edit value manually or choose: key "localhost" value "127.0.0.1" etc. -| combobox_values | dictionary | pairs of 'key - value' for combo_with/without_edit | | -| data_type_regexp | string | in case it is reg_exp the name of the function | xref:GenRegExp[GenRegExp] | -|================= - - -.Default Properties anchor:Properties[] -[options="header",cols="1,7"] -|================= -| Field Name | meaning -| ipv4_checksum | auto calculates checksum on this header Ipv4 type -| tcp_checksum | calculate TCP checksum -| udp_checksum | calculate UDP checksum -| ipv4_total_length | calculate ipv4 total length this pkt_size = header + reset of packet -| tlv | TLV length of the header (inlcudes the prev field length) example ip-option, tcp-option -| le | little endian. deault is big -| const | const field for example the 4 version of ipv4 header - this GUI won't give option to change this field -| external | marks the header as an external header for the GUI. for example IPv4 is external header and mac-addr is internal header ( compose external header) -|================= - - -.Field_Type anchor:Field_Type[] -[options="header",cols="1,^1,30,^1,^1,30"] -|================= -| Field Name | value type | meaning | Default Value | Link | Example -| class | string | describe the class type | in case class is defined no need to have name and vise versa | | class : tcp -| name | string | describe the instance name | in case class is defined no need to have name and vise versa | | name : tcp -| array_size | integer | how many objects of this type, default value is 1 | 1 | | array_size : 6 in case of mac-addr -| type | string | type, see Types define the size | "uint8_t" | xref:Types[Types] | type : "uint32_t" type : "mac_addr" -| gui_representation | dictionary | description of how to view/edit data in GUI | | xref:Gui_Representation[Gui_Representation] | xref:Gui_Representation_Example[Gui_Representation_Example] -| default | array/value | default value in the packets , you can override value for subfields in parent see example -| [0 ]x header size | | xref:Overide_Subfields_Example[Overide_Subfields_Example] -| properies | array of string like masks -| properies of this fields | [] | xref:Properties[Properties] | ["le","external"] , ["tlv","le","const"] -| value_based_next_header | dictionary | define the next protocol based on a field value | none | xref:Value_Based_Next_Header[Value_Based_Next_Header] | -| value_based_next_class | dictionary | define the next class based on a field value (useful for options) | none | xref:Value_Based_Next_Class[Value_Based_Next_Class] | -| next_headers | string or type | a name of class that define the next or just an array | "none" | xref:Next_headers[Next_headers] | -| fields | array | array of Field_Type | [] | | fields : [ ] -| offset | integer/string | offset into the packet in bits, in case of auto add base of prev fields | "auto" | | -| option | string | a java code that define a way to calculate varible size | "none" | | | -|================= - - -.Field_Type anchor:ConstHeadesClass[] -[options="header",cols="^1,^10"] -|================= -| Field Name | value type -| "root" | the root pointer to the start of blocks L2/802.3 etc -| "end" | end TLV headers -| "payload" | the rest of the packets as buffer/string etc -|================= - - -.Next_headers anchor:Next_headers[] -Example of Next_headers -[source,python] ----- - - - class : "next-example-t-1" - gui_representation: - help : "next-example-t-1" - next_headers : ["ipv4", "ipv6, "tcp"] - -# option 1 define in the header itself - - class : "tcp" - gui_representation: - help : "TCP header" - properies : ["external"] - next_headers : ["ipv4", "ipv6, "tcp"] - fields : - - name : "ver" - -# option 2 define throw a class - - class : "tcp" - gui_representation: - help : "TCP header" - properies : ["external"] - next_headers : "next-example-t-1" # - fields : - - name : "ver" ----- - - -.Value_Based_Next_Header anchor:Value_Based_Next_Header[] -Example of value_based_next_header -[source,python] ----- - value_based_next_header: - 0x0800: 'ipv4'# name of an external or internal class , the GUI should distinct betwean internal and external - 0x0806: 'arp' - 0x86DD: 'ipv6' - 0x8100: 'vlan' - 0x8847: 'mpls unicast' - default: 'payload' # if no match for any of above - ----- - - -.Generic RegExp Edit Field anchor:GenRegExp[] - -This will define a regexp that match for user input and how to converts it to buffer of bytes - -[source,python] ----- - -class MyClass : public RegExpBase { - public: - - - string get_reg_exp_string( ) { - return ((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})) - } - - # in case of match - buffer get_buffer(){ - g= [get_group()[1].to_int()*256,get_group()[1].to_int()] - # return list - return (g) - } - -} - ----- - - - -==== Relations between object headers - -There would be a root object to point to possible starting headers - - -[source,python] ----- - -- class : "root" - gui_representation: - help : "Root" - next_headers : [ "ethernet", "llc", "_802-3"] ----- - -So in a way you could define a tree like this - -[source,python] ----- - -root -> L2 ( Ethernet , 802.3 , LLC SNAP ) - |( by field ) - | - ------------------------------------- ( VLAN (with QinQ), MPLS , ipv4, ipv6, ARP , ICMP ) - | | | | - | ipv4/ipv6 - - - | | - | | - [Possibility - Ethernet/802.3/LLC SNAP) | UDP/TCP/Pyload - Object | | - for each option there tree of all the option --- - ----- - - -==== Rules - -* The size of the header and offset is automatically defined in default by the order of the fields ( inc by type size multiply by array_size) -* It can be overrided by offset field ( put offset in the object ) and then an more advanced field can be shown earlier in the GUI -* The packet size is defined before the headers. Header Should not be allowed to be added if the size + header size is bigger than packet size -* "Payload" is predefined Fields that take the reset of the packet and user can edit it ( see xref:Payload[Payload] ) -* There would be a spare field in the Stream object so GUI could add more metadata for reconstructing the builder types - for example in this example Ethrenet/IP/TCP/IP/TCP you can't extrac from buffer alone that Payload is IP/TCP only the builder known that in build time. -* Ip total length need to keep the total_pkt_size - this ip header . this should work for internal header too. -* When GUI add header ("external") the total size of this header should be calculated ( varible size should be given a default - ipv4) - - -=== Examples - - -==== TLV (Ip option) anchor:IpvOption[], value_based_next_class anchor:Value_Based_Next_Class[] - - -IP-option see link:http://tools.ietf.org/html/rfc791[ip_option] - -0 : END - -1 : Length 1 - -other : Byte : Length ( +first) |option - - - -[source,python] ----- - - - class : "ip_option_131" - gui_representation: - help : "ip_option" - fields : - - name : "length" # tree with leaf of bits - gui_representation: - help : "length" - type : uint8 - properties : ["tlv"] # the length include the prev field size (8 byte) - - - name : "pointer" # tree with leaf of bits - type : uint8 - - - name : "buffer" # tree with leaf of bits - type : "tlv_reset" - - - class : "default_ip4_option_tlv" - gui_representation: - help : "ip_option" - fields : - - name : "length" # tree with leaf of bits - gui_representation: - help : "length" - type : uint8 - properties : "tlv" # the length include the prev field size (8 byte) - - - name : "buffer" # tree with leaf of bits - type : "vlen_t" - - - - class : "ip_option" - gui_representation: - help : "ip_option" - type : uint8 - default : [0x01] - value_based_next_class : - 0x00 : "end" # reserve name for ending the loop - 0x01 : "ip_option" # back to this header - 0x131 : "ip_option_131" - 0x0812: "gre" - default : "default_ip4_option_tlv" - - ----- - -* case of varible length field ip_option example - - - -==== Example TCP/IP - - -[source,python] ----- - - - class : "c-mac-addr" - type : "uint8" - array_size : 6 - default : [0x00, 0x00, 0x01, 0x00, 0x00, 0x00] - gui_representation: - data_type : "mac-addr_t" # format ([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2} - help : "Mac addrees" - - - - class : "ethernet" - gui_representation: - help : "Ethernet-L2" - properties: ['external'] - fields : - - name : "Dst" - gui_representation: - help : "destination mac" - type : "c-mac-addr" - - - name : "Src" - gui_representation: - help : "source mac" - type : "c-mac-addr" - - - name: "Ethertype" - gui_representation: - help: "Ethertype" - type: "uint16" - default: [0x0800] - value_based_next_header : - 0x0800 : "ipv4" - 0x86DD : "ipv6" - 0x8100 : "vlan" - 0x8847 : "mpls" #unicast - default : "payload" - - - - class : "ipv4" - gui_representation: - help : "Ipv4" - fields : - - name : "ver" - gui_representation: - help : "Version" - type : "bit" - array_size : 4 - default : [0, 1, 0, 0] - properties : ["const"] - - - name : "ihl" - type : "bit" - array_size : 4 - default : [0, 1, 1, 1] - properties : ["ipv4_ihl"] - gui_representation: - help : "IHL" - form_type: "checkbox" - - .. - - - name : "hdr_chsum" - gui_representation: - help : "Header Checksum" - default : [0x00,0x00] - properties : ["ipv4_check_sum", "const"] - - - name : "total_len" - gui_representation: - help : "Total Length" - default : [0x00,0x00] - properties : ["ipv4_total_len", "const"] # auto calculate total_size-offset_header - - - name : "protocol" - type : uint8 - default : 0x06 - value_based_next_header : &ipv4_next_header - 0x06 : "tcp" - 0x11 : "udp" - 0x29 : "ipv6" - 0x2F : "gre" - default : "payload" - gui_representation: - help : "IPv4 next Protocol" - form_type: "combo_without_edit" - combobox_values: - <<: *ipv4_next_header # take same choices as value_based_next_header - - - name : "src_addr" - type : uint32 - default : [16, 0, 0, 0] - gui_representation: - help : "Source Address" - data_type : "ipv4" # reserve - - - name : "dst_addr" - default : [48, 0, 0, 0] - type : uint32 - gui_representation: - help : "Destination Address" - data_type : "ipv4" # reserve - form_type : "combo_with_edit" - combobox_values: - [127, 0, 0, 1]: 'localhost' - [255, 255, 255, 255]: 'broadcast' - - - - class : "tcp" - gui_representation: - help : "TCP" - properties : ["external"] - fields : - - name : "src_port" - gui_representation: - help : "Source Port" - default : [0x30,0x00] - type : uint16 - - - name : "dest_port" - gui_representation: - help : "Source Port" - default : [0x30,0x00] - type : uint16 - - - name : "seq" - gui_representation: - help : "Seq Number" - type : uint32 - default : [0x30,0x00,00,00] - - - name : "ack" - gui_representation: - help : "Ack Number" - type : uint32 - default : [0x30,0x00,00,00] - - ... - - - name : "flags" # tree with leaf of bits - gui_representation: - help : "Ack Number" - type : uint8 - default : [0x30] - fields : - - name : "urg" - help : "URG" - type : bit - default : [0x0] - - - name : "ack" - help : "ACK" - type : bit - default : [0x1] - .. - - - name : "checksum" - gui_representation: - help : "TCP Checksum" - type : uint16 - default : [0x00,0x00] - properties : ["tcp_checksum"] # auto calculate total_size-offset_header - - -- class : "root" # reserve - gui_representation: - help : "Root" - next_headers : [ "ethrenet" ,"llc","_802-3"] ---------------------------- - - -==== Overide subfields example anchor:Overide_Subfields_Example[] - -In this example parent class default value overrides default values of sub-fields ( 2 different mac-addr) - -[source,python] ----- - - - class : "c-mac-addr" - type : "uint8" - array_size : 6 - gui_representation: - help : "Mac addrees" - data_type : "mac-addr_t" # format ([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2} - default : [0x00,0x00,0x01,0x00,0x00,0x00] - - - - class : "ethernet" - gui_representation: - help : "Ethernet-L2" - properties : ["external"] - default : [0x00,0x01,0x01,0x00,0x00,0x00, 0x00,0x02,0x02,0x00,0x00,0x00 ,0x08,00] # change the default of sub-fields . it is const size - fields : - - name : "Dst" - gui_representation: - help : "destination mac" - type : "c-mac-addr" - - - name : "Src" - gui_representation: - help : "source mac" - type : "c-mac-addr" - - - name : "ip_protocol" - type : "uint16_t" - default : [0x08,0x00] - value_based_next_header : - 0x0800 : "ipv4" - 0x86DD : "ipv6" - 0x8100 : "vlan" - 0x8847 : "mpls unicast" - default : "payload" ----- - -==== Gui Representation example anchor:Gui_Representation_Example[] -[underline]#In YAML:# -[source,python] ----- - - name: 'Flags' - type: 'bit' - array_size: 3 - gui_representation: - help: 'IPv4 Flags' - form_type: 'checkbox' # can check each bit - - - - name: 'dst_addr' - default: [48, 0, 0, 0] - type: uint32 - gui_representation: - help: 'IPv4 Destination Address' - data_type: 'ipv4_t' # special representation case, show as 4 decimal numbers - form_type: 'combo_with_edit' # can choose from pre-defined values or edit manually - combobox_values: - [127, 0, 0, 1]: 'localhost' - [255, 255, 255, 255]: 'broadcast' - - - - name: 'protocol' - type: uint8 - default: 0x06 - value_based_next_header: &ipv4_next_header - 0x06: 'tcp' - 0x11: 'udp' - default : "payload" - gui_representation: - help: 'IPv4 Protocol Field' - form_type: 'combo_without_edit' # choose from supported protocols, no manual edit - combobox_values: - <<: *ipv4_next_header # take same choices as value_based_next_header ----- - -[underline]#In GUI:# - -checkbox for bits: - -image:images/checkbox.jpg[] - -editing in combo-box: - -image:images/combo_button_editing.jpg[] - -choosing from predefined values: - -image:images/combo_button_choosing.jpg[] - -==== Union base - -TBD - - - -=== Resource -* link:yaml/headers.yaml[headers.yaml] -* link:https://wireedit.com/[WireEdit] -* link:http://ostinato.org/[ostinato] -* link:http://www.slideshare.net/nlekh/ixiaexplorer[IxExplorer] - - +Packet Builder Language +======================= +:author: hhaim +:email: +:revnumber: 0.04 +:quotes.++: +:numbered: + +== change log + +[options="header",cols="^1,^h,a"] +|================= +| Version | name | meaning +| 0.01 | hhaim | +- first version +| 0.02 | hhaim +| +- change the bool fields to properties +- add external/internal property +- add const property ( instead cant_change) +- change TLV property - now learn the prev header +- add choice of next protocol that is not base on a field ( TCP->IP->TCP) +| 0.03 | ybrustin +| +- add MAC address regexp +- add gui_representation class with data_type, form_type, combobox_values, data_type_regexp items to describe GUI view of field +- rename choice attribute to value_based_next_header +- fixed some typos +| 0.04 | ybrustin +| +- change value_based_next_header, combobox_values (to be consistent with value_based_next_header) to dictionary +- added value_based_next_class for options +- move 'help' attribute to gui_representation +- add link to headers.yaml (references at bottom of the page) + +|================= + + +== A file format for GUI packet builder + +=== Introduction + +We would like a file that will be read by GUI and will give us the ability to build packets using GUI + +The format should be *YAML* + + +=== High Level Requirement + +* Define a YAML object format for dynamic building of packets and a program that change various fields +* Ability to *parse* back the same buffer that was created using this tool (reversibility) +** Ability to load packet from a pcap file and parse it +* Ability to save the packet to a pcap file +* Ability to save the packet and program in JSON format (same JSON-RPC format) +* Set a value for any field of any protocol +* Vary packet fields across packets at run time e.g. changing IP/MAC addresses +* Stack protocols in any arbitrary order define in YAML format + +=== Header that should be supported (first phase) + +==== L2 + +* Ethernet +* 802.3 +* LLC SNAP +* VLAN (with QinQ) stack +* MPLS stack + +==== L3 + +* ARP +* IPv4 +* IPv6 (4x header) +* IP-in-IP a.k.a IP Tunnelling (6over4, 4over6, 4over4, 6over6) + +==== L4 + +* TCP +* UDP +* ICMPv4 +* ICMPv6 +* IGMP + +==== L7 anchor:Payload[] + +* Any text based protocol (HTTP, SIP, RTSP, NNTP etc.) +** random string +** repeat string + +* Pattern Binary +** repeat of value (e.g 0x55) +** random +** seq (1,2,3,3,4) +** User Hex Dump editor + + +=== YAML Format + +==== Header section + +.Default Types anchor:Types[] +[options="header",cols="1,2,3"] +|================= +| Field Name | meaning | size in bits +| bit | describe the header object e.g tcp | 1 +| uint8 | describe the header object e.g tcp | 8 +| uint16 | the name in the GUI | 16 +| uint32 | sub fields of this header | 32 +| uint64 | sub fields of this header | 64 +| other class type | name of other class. for example, "c-mac-addr"; take fields from there, optionally overload them later | The size taken from that class +| Payload | xref:Payload[Payload] | total packet size - all header until now +| vlen_t | in case of varible size header this include the size to the end of varible size header see example xref:IpvOption[Ipv4Option] |total size of the object +|================= + + +.Default Data_Type anchor:Data_Type[] +[options="header",cols="1,2"] +|================= +| Field Name | meaning +| none | use Hex Editor as Types +| ipv4_t | 4 decimals 0-255 each +| mac_addr_t | ([0-9a-fA-F]\{2\}:)\{5\}[0-9a-fA-F]\{2\} +| ipv4_mask_t | should match uint32 type +| ipv6_t | should have 16 bytes field size 8x16 +| ipv6_mask_t | should have 16 bytes field size 8x16 +| another header class | sub fields of this header +| char_t | array of bytes , look into the array_size of cost string +| var_char_t | array based on a field value look into +| regexp_t | define a Java function that converts a reg exp string to a buffer see here xref:GenRegExp[RegExp] +|================= + +.Default Form_Type anchor:Form_Type[] +[options="header",cols="1,3"] +|================= +| Field Name | meaning +| none | simple editing field +| combo_with_edit | combo box with predefined choices, can edit the field value manually +| combo_without_edit | combo box with predefined choices, can [underline]#not# edit the field value manually +| checkbox | toggle bits values, if item is array of bits, display several checkboxes per number of bits +|================= + + +.Default Gui_Representation anchor:Gui_Representation[] +[options="header",cols="1,^1,5,^1,10"] +|================= +| Field Name | value type | meaning | Link | Additional info +| help | string | the name in the GUI | | +| data_type | string | how to represent data | xref:Data_Type[Data_Type] | data_type could get data_type_regexp e.g data_type = "ipv4"; data_type = "regexp" data_type_regexp = "string that define regexp and Java function" +| form_type | string | which editing form to use | xref:Form_Type[Form_Type] | for example for ip address use combobox with option to edit value manually or choose: key "localhost" value "127.0.0.1" etc. +| combobox_values | dictionary | pairs of 'key - value' for combo_with/without_edit | | +| data_type_regexp | string | in case it is reg_exp the name of the function | xref:GenRegExp[GenRegExp] | +|================= + + +.Default Properties anchor:Properties[] +[options="header",cols="1,7"] +|================= +| Field Name | meaning +| ipv4_checksum | auto calculates checksum on this header Ipv4 type +| tcp_checksum | calculate TCP checksum +| udp_checksum | calculate UDP checksum +| ipv4_total_length | calculate ipv4 total length this pkt_size = header + reset of packet +| tlv | TLV length of the header (inlcudes the prev field length) example ip-option, tcp-option +| le | little endian. deault is big +| const | const field for example the 4 version of ipv4 header - this GUI won't give option to change this field +| external | marks the header as an external header for the GUI. for example IPv4 is external header and mac-addr is internal header ( compose external header) +|================= + + +.Field_Type anchor:Field_Type[] +[options="header",cols="1,^1,30,^1,^1,30"] +|================= +| Field Name | value type | meaning | Default Value | Link | Example +| class | string | describe the class type | in case class is defined no need to have name and vise versa | | class : tcp +| name | string | describe the instance name | in case class is defined no need to have name and vise versa | | name : tcp +| array_size | integer | how many objects of this type, default value is 1 | 1 | | array_size : 6 in case of mac-addr +| type | string | type, see Types define the size | "uint8_t" | xref:Types[Types] | type : "uint32_t" type : "mac_addr" +| gui_representation | dictionary | description of how to view/edit data in GUI | | xref:Gui_Representation[Gui_Representation] | xref:Gui_Representation_Example[Gui_Representation_Example] +| default | array/value | default value in the packets , you can override value for subfields in parent see example +| [0 ]x header size | | xref:Overide_Subfields_Example[Overide_Subfields_Example] +| properies | array of string like masks +| properies of this fields | [] | xref:Properties[Properties] | ["le","external"] , ["tlv","le","const"] +| value_based_next_header | dictionary | define the next protocol based on a field value | none | xref:Value_Based_Next_Header[Value_Based_Next_Header] | +| value_based_next_class | dictionary | define the next class based on a field value (useful for options) | none | xref:Value_Based_Next_Class[Value_Based_Next_Class] | +| next_headers | string or type | a name of class that define the next or just an array | "none" | xref:Next_headers[Next_headers] | +| fields | array | array of Field_Type | [] | | fields : [ ] +| offset | integer/string | offset into the packet in bits, in case of auto add base of prev fields | "auto" | | +| option | string | a java code that define a way to calculate varible size | "none" | | | +|================= + + +.Field_Type anchor:ConstHeadesClass[] +[options="header",cols="^1,^10"] +|================= +| Field Name | value type +| "root" | the root pointer to the start of blocks L2/802.3 etc +| "end" | end TLV headers +| "payload" | the rest of the packets as buffer/string etc +|================= + + +.Next_headers anchor:Next_headers[] +Example of Next_headers +[source,python] +---- + + - class : "next-example-t-1" + gui_representation: + help : "next-example-t-1" + next_headers : ["ipv4", "ipv6, "tcp"] + +# option 1 define in the header itself + - class : "tcp" + gui_representation: + help : "TCP header" + properies : ["external"] + next_headers : ["ipv4", "ipv6, "tcp"] + fields : + - name : "ver" + +# option 2 define throw a class + - class : "tcp" + gui_representation: + help : "TCP header" + properies : ["external"] + next_headers : "next-example-t-1" # + fields : + - name : "ver" +---- + + +.Value_Based_Next_Header anchor:Value_Based_Next_Header[] +Example of value_based_next_header +[source,python] +---- + value_based_next_header: + 0x0800: 'ipv4'# name of an external or internal class , the GUI should distinct betwean internal and external + 0x0806: 'arp' + 0x86DD: 'ipv6' + 0x8100: 'vlan' + 0x8847: 'mpls unicast' + default: 'payload' # if no match for any of above + +---- + + +.Generic RegExp Edit Field anchor:GenRegExp[] + +This will define a regexp that match for user input and how to converts it to buffer of bytes + +[source,python] +---- + +class MyClass : public RegExpBase { + public: + + + string get_reg_exp_string( ) { + return ((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})[.]((\d){1-3})) + } + + # in case of match + buffer get_buffer(){ + g= [get_group()[1].to_int()*256,get_group()[1].to_int()] + # return list + return (g) + } + +} + +---- + + + +==== Relations between object headers + +There would be a root object to point to possible starting headers + + +[source,python] +---- + +- class : "root" + gui_representation: + help : "Root" + next_headers : [ "ethernet", "llc", "_802-3"] +---- + +So in a way you could define a tree like this + +[source,python] +---- + +root -> L2 ( Ethernet , 802.3 , LLC SNAP ) + |( by field ) + | + ------------------------------------- ( VLAN (with QinQ), MPLS , ipv4, ipv6, ARP , ICMP ) + | | | | + | ipv4/ipv6 - - + | | + | | + [Possibility - Ethernet/802.3/LLC SNAP) | UDP/TCP/Pyload + Object | | + for each option there tree of all the option --- - +---- + + +==== Rules + +* The size of the header and offset is automatically defined in default by the order of the fields ( inc by type size multiply by array_size) +* It can be overrided by offset field ( put offset in the object ) and then an more advanced field can be shown earlier in the GUI +* The packet size is defined before the headers. Header Should not be allowed to be added if the size + header size is bigger than packet size +* "Payload" is predefined Fields that take the reset of the packet and user can edit it ( see xref:Payload[Payload] ) +* There would be a spare field in the Stream object so GUI could add more metadata for reconstructing the builder types + for example in this example Ethrenet/IP/TCP/IP/TCP you can't extrac from buffer alone that Payload is IP/TCP only the builder known that in build time. +* Ip total length need to keep the total_pkt_size - this ip header . this should work for internal header too. +* When GUI add header ("external") the total size of this header should be calculated ( varible size should be given a default - ipv4) + + +=== Examples + + +==== TLV (Ip option) anchor:IpvOption[], value_based_next_class anchor:Value_Based_Next_Class[] + + +IP-option see link:http://tools.ietf.org/html/rfc791[ip_option] + +0 : END + +1 : Length 1 + +other : Byte : Length ( +first) |option + + + +[source,python] +---- + + - class : "ip_option_131" + gui_representation: + help : "ip_option" + fields : + - name : "length" # tree with leaf of bits + gui_representation: + help : "length" + type : uint8 + properties : ["tlv"] # the length include the prev field size (8 byte) + + - name : "pointer" # tree with leaf of bits + type : uint8 + + - name : "buffer" # tree with leaf of bits + type : "tlv_reset" + + - class : "default_ip4_option_tlv" + gui_representation: + help : "ip_option" + fields : + - name : "length" # tree with leaf of bits + gui_representation: + help : "length" + type : uint8 + properties : "tlv" # the length include the prev field size (8 byte) + + - name : "buffer" # tree with leaf of bits + type : "vlen_t" + + + - class : "ip_option" + gui_representation: + help : "ip_option" + type : uint8 + default : [0x01] + value_based_next_class : + 0x00 : "end" # reserve name for ending the loop + 0x01 : "ip_option" # back to this header + 0x131 : "ip_option_131" + 0x0812: "gre" + default : "default_ip4_option_tlv" + + +---- + +* case of varible length field ip_option example + + + +==== Example TCP/IP + + +[source,python] +---- + + - class : "c-mac-addr" + type : "uint8" + array_size : 6 + default : [0x00, 0x00, 0x01, 0x00, 0x00, 0x00] + gui_representation: + data_type : "mac-addr_t" # format ([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2} + help : "Mac addrees" + + + - class : "ethernet" + gui_representation: + help : "Ethernet-L2" + properties: ['external'] + fields : + - name : "Dst" + gui_representation: + help : "destination mac" + type : "c-mac-addr" + + - name : "Src" + gui_representation: + help : "source mac" + type : "c-mac-addr" + + - name: "Ethertype" + gui_representation: + help: "Ethertype" + type: "uint16" + default: [0x0800] + value_based_next_header : + 0x0800 : "ipv4" + 0x86DD : "ipv6" + 0x8100 : "vlan" + 0x8847 : "mpls" #unicast + default : "payload" + + + - class : "ipv4" + gui_representation: + help : "Ipv4" + fields : + - name : "ver" + gui_representation: + help : "Version" + type : "bit" + array_size : 4 + default : [0, 1, 0, 0] + properties : ["const"] + + - name : "ihl" + type : "bit" + array_size : 4 + default : [0, 1, 1, 1] + properties : ["ipv4_ihl"] + gui_representation: + help : "IHL" + form_type: "checkbox" + + .. + + - name : "hdr_chsum" + gui_representation: + help : "Header Checksum" + default : [0x00,0x00] + properties : ["ipv4_check_sum", "const"] + + - name : "total_len" + gui_representation: + help : "Total Length" + default : [0x00,0x00] + properties : ["ipv4_total_len", "const"] # auto calculate total_size-offset_header + + - name : "protocol" + type : uint8 + default : 0x06 + value_based_next_header : &ipv4_next_header + 0x06 : "tcp" + 0x11 : "udp" + 0x29 : "ipv6" + 0x2F : "gre" + default : "payload" + gui_representation: + help : "IPv4 next Protocol" + form_type: "combo_without_edit" + combobox_values: + <<: *ipv4_next_header # take same choices as value_based_next_header + + - name : "src_addr" + type : uint32 + default : [16, 0, 0, 0] + gui_representation: + help : "Source Address" + data_type : "ipv4" # reserve + + - name : "dst_addr" + default : [48, 0, 0, 0] + type : uint32 + gui_representation: + help : "Destination Address" + data_type : "ipv4" # reserve + form_type : "combo_with_edit" + combobox_values: + [127, 0, 0, 1]: 'localhost' + [255, 255, 255, 255]: 'broadcast' + + + - class : "tcp" + gui_representation: + help : "TCP" + properties : ["external"] + fields : + - name : "src_port" + gui_representation: + help : "Source Port" + default : [0x30,0x00] + type : uint16 + + - name : "dest_port" + gui_representation: + help : "Source Port" + default : [0x30,0x00] + type : uint16 + + - name : "seq" + gui_representation: + help : "Seq Number" + type : uint32 + default : [0x30,0x00,00,00] + + - name : "ack" + gui_representation: + help : "Ack Number" + type : uint32 + default : [0x30,0x00,00,00] + + ... + + - name : "flags" # tree with leaf of bits + gui_representation: + help : "Ack Number" + type : uint8 + default : [0x30] + fields : + - name : "urg" + help : "URG" + type : bit + default : [0x0] + + - name : "ack" + help : "ACK" + type : bit + default : [0x1] + .. + + - name : "checksum" + gui_representation: + help : "TCP Checksum" + type : uint16 + default : [0x00,0x00] + properties : ["tcp_checksum"] # auto calculate total_size-offset_header + + +- class : "root" # reserve + gui_representation: + help : "Root" + next_headers : [ "ethrenet" ,"llc","_802-3"] +--------------------------- + + +==== Overide subfields example anchor:Overide_Subfields_Example[] + +In this example parent class default value overrides default values of sub-fields ( 2 different mac-addr) + +[source,python] +---- + + - class : "c-mac-addr" + type : "uint8" + array_size : 6 + gui_representation: + help : "Mac addrees" + data_type : "mac-addr_t" # format ([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2} + default : [0x00,0x00,0x01,0x00,0x00,0x00] + + + - class : "ethernet" + gui_representation: + help : "Ethernet-L2" + properties : ["external"] + default : [0x00,0x01,0x01,0x00,0x00,0x00, 0x00,0x02,0x02,0x00,0x00,0x00 ,0x08,00] # change the default of sub-fields . it is const size + fields : + - name : "Dst" + gui_representation: + help : "destination mac" + type : "c-mac-addr" + + - name : "Src" + gui_representation: + help : "source mac" + type : "c-mac-addr" + + - name : "ip_protocol" + type : "uint16_t" + default : [0x08,0x00] + value_based_next_header : + 0x0800 : "ipv4" + 0x86DD : "ipv6" + 0x8100 : "vlan" + 0x8847 : "mpls unicast" + default : "payload" +---- + +==== Gui Representation example anchor:Gui_Representation_Example[] +[underline]#In YAML:# +[source,python] +---- + - name: 'Flags' + type: 'bit' + array_size: 3 + gui_representation: + help: 'IPv4 Flags' + form_type: 'checkbox' # can check each bit + + + - name: 'dst_addr' + default: [48, 0, 0, 0] + type: uint32 + gui_representation: + help: 'IPv4 Destination Address' + data_type: 'ipv4_t' # special representation case, show as 4 decimal numbers + form_type: 'combo_with_edit' # can choose from pre-defined values or edit manually + combobox_values: + [127, 0, 0, 1]: 'localhost' + [255, 255, 255, 255]: 'broadcast' + + + - name: 'protocol' + type: uint8 + default: 0x06 + value_based_next_header: &ipv4_next_header + 0x06: 'tcp' + 0x11: 'udp' + default : "payload" + gui_representation: + help: 'IPv4 Protocol Field' + form_type: 'combo_without_edit' # choose from supported protocols, no manual edit + combobox_values: + <<: *ipv4_next_header # take same choices as value_based_next_header +---- + +[underline]#In GUI:# + +checkbox for bits: + +image:images/checkbox.jpg[] + +editing in combo-box: + +image:images/combo_button_editing.jpg[] + +choosing from predefined values: + +image:images/combo_button_choosing.jpg[] + +==== Union base + +TBD + + + +=== Resource +* link:yaml/headers.yaml[headers.yaml] +* link:https://wireedit.com/[WireEdit] +* link:http://ostinato.org/[ostinato] +* link:http://www.slideshare.net/nlekh/ixiaexplorer[IxExplorer] + + diff --git a/yaml/headers.yaml b/yaml/headers.yaml index fd217dc6..14a94e56 100755 --- a/yaml/headers.yaml +++ b/yaml/headers.yaml @@ -22,7 +22,7 @@ gui_representation: help: 'Ethernet-L2' properties: ['external'] - default: [0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x00, 0x08, 0x00] + default: [0x01, 0x01, 0x01, 0x00, 0x02, 0x02, 0x00, 0x02, 0x02, 0x00, 0x00, 0x00, 0x08, 0x00] fields: - name: 'Dst' gui_representation: @@ -42,9 +42,9 @@ default: 0x0800 value_based_next_header: 0x0800: 'ipv4' + 0x86DD: 'ipv6' # not implemented # 0x0806: 'arp' - # 0x86DD: 'ipv6' # 0x8100: 'vlan' # field of 4 bytes added in this case, what to do? # 0x8847: 'mpls unicast' # unicast or multicast? multicast is 0x8848 default: 'payload' @@ -148,8 +148,8 @@ value_based_next_header: &ipv4_next_header 0x06: 'tcp' 0x11: 'udp' + 0x29: 'ipv6' # not implemented - # 0x29: 'ipv6' # 0x2F: 'gre' default: 'payload' gui_representation: @@ -180,6 +180,67 @@ # help: 'IPv4 options' # type: 'c-ipv4-options' + + - class: 'ipv6' + gui_representation: + help: 'IPv6' + properties: ['external'] + fields: + - name: 'ver' + gui_representation: + help: 'Version' + type: 'c-bit' + array_size: 4 + default: [0, 1, 1, 0] + properties: ['const'] + + - name: 'traff_class/diff_serv' + gui_representation: + help: 'Traffic Class/Diff Serv' + type: 'bit' + array_size: 8 + default: [0, 0, 0, 0, 0, 0, 0, 0] + + - name: 'flow_label' + gui_representation: + help: 'Flow Label' + type: 'bit' + array_size: 20 + default: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] + + - name: 'payload_length' + gui_representation: + help: 'Payload Length' + type: 'uint16' + default: 40 + + - name: 'ipv6_next_header' + gui_representation: + help: 'IPv6 Next Header field' + type: 'uint8' + value_based_next_header: + 6: 'tcp' + 17: 'udp' + 41: 'ipv6' + default: 'tcp' + + - name: 'hop_limit' + gui_representation: + help: 'IPv6 hop limit' + type: 'uint8' + default: 200 + + - name: 'Src' + gui_representation: + help: 'Source IPv6' + type: 'c-ipv6-addr' + + - name: 'Dst' + gui_representation: + help: 'Destination IPv6' + type: 'c-ipv6-addr' + + # L4 @@ -286,7 +347,7 @@ gui_representation: help: 'Window size' type: uint16 - default: 0x0000 + default: 0x0080 - name: 'checksum' gui_representation: @@ -375,6 +436,16 @@ help: 'bit with checkbox representation' form_type: 'checkbox' + + - class: 'c-ipv6-addr' + type: 'uint16' + array_size: 8 + default: [7, 8, 7, 8, 7, 8, 7, 8] + gui_representation: + help: 'IPv6 address' + data_type: 'ipv6_t' + form_type: 'combo_with_edit' + ###################### # incomplete -- cgit 1.2.3-korg From 6313b1dd3fa5af87a7121dadf7c1c73df1eaf135 Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Tue, 6 Oct 2015 14:42:45 +0300 Subject: Updated Linux OS distributions section, added change log --- trex_book.asciidoc | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 8f3e4ac5..f6f0f55c 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -9,6 +9,19 @@ TRex :local_web_server_url: csi-wiki-01:8181/trex +== Change log + +[options="header",cols="^1,^h,3a"] +|================= +| Version | name | meaning +| 1.77-0.0 | Hanoh Haim (hhaim) | +- initail versions +| 1.77.1 | Dan Klein (danklei) +| +- fixed ISO images references and supported linux supported OS + +|================= + == Introduction @@ -137,21 +150,36 @@ NOTE: You should buy seperatly the 10Gb/sec SFP+, Cisco would be fine with TRex ==== Supported versions -Fedora 18, Ubuntu 14.04.1 LTS and Fedora 20 are the Linux OS supported. +Fedora 18-20, and Ubuntu 14.04.1 LTS are the Linux OS supported. More OS could be supported by compiling the drivers. ==== Download ISO file -Download the ISO from Fedora web site from link:http://archive.fedoraproject.org/pub/fedora/linux/releases/18/Fedora/x86_64/iso/[here]. +The ISO images of the described Linux OS can be downloaded from the following links: + +.Supported Linux ISO image links +[options="header",cols="1,2,3^",width="50%"] +|====================================== +| # | Distribution | SHA256 Checksum +| 1.| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/18/Fedora/x86_64/iso/Fedora-18-x86_64-DVD.iso[Fedora 18] + | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/18/Fedora/x86_64/iso/Fedora-18-x86_64-CHECKSUM[Fedora 18 CHECKSUM] +| 2.| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-19-x86_64-DVD.iso[Fedora 19] + | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-19-x86_64-CHECKSUM[Fedora 19 CHECKSUM] +| 3.| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-20-x86_64-DVD.iso[Fedora 20] + | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-CHECKSUM[Fedora 20 CHECKSUM] +| 4.|link:http://releases.ubuntu.com/14.04.3/ubuntu-14.04.3-desktop-amd64.iso[Ubuntu 14.04] + | link:http://releases.ubuntu.com/14.04/SHA256SUMS[Ubuntu 14.04 CHECKSUM] +|====================================== + -Verify the checksum with the following command: +Then, verify the checksum of the downloaded file matches the linked checksum values with the `sha256sum` command. For example: [source,bash] ---- $sha256sum Fedora-18-x86_64-DVD.iso 91c5f0aca391acf76a047e284144f90d66d3d5f5dcd26b01f368a43236832c03 #<1> ---- -<1> Should be equal to this number. +<1> Should be equal to the sha256 values described in the linked CHECKSUM files. ==== Install Linux -- cgit 1.2.3-korg From 4c1b6775eef57dd9f1264cd95925050be813511b Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Tue, 6 Oct 2015 14:48:31 +0300 Subject: Updated Ubuntu OS links reference --- trex_book.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index f6f0f55c..df78de04 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -167,8 +167,8 @@ The ISO images of the described Linux OS can be downloaded from the following li | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-19-x86_64-CHECKSUM[Fedora 19 CHECKSUM] | 3.| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-20-x86_64-DVD.iso[Fedora 20] | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-CHECKSUM[Fedora 20 CHECKSUM] -| 4.|link:http://releases.ubuntu.com/14.04.3/ubuntu-14.04.3-desktop-amd64.iso[Ubuntu 14.04] - | link:http://releases.ubuntu.com/14.04/SHA256SUMS[Ubuntu 14.04 CHECKSUM] +| 4.|link:http://old-releases.ubuntu.com/releases/14.04.1/ubuntu-14.04-desktop-amd64.iso[Ubuntu 14.04.01] + | http://old-releases.ubuntu.com/releases/14.04.1/SHA256SUMS[Ubuntu 14.04 CHECKSUM] |====================================== -- cgit 1.2.3-korg From 8f5d21f847a1fe442babfd12230feafe059504f4 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 12 Oct 2015 16:38:33 +0300 Subject: 64bit kernels are supported --- trex_book.asciidoc | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index df78de04..5e544786 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -150,8 +150,21 @@ NOTE: You should buy seperatly the 10Gb/sec SFP+, Cisco would be fine with TRex ==== Supported versions -Fedora 18-20, and Ubuntu 14.04.1 LTS are the Linux OS supported. -More OS could be supported by compiling the drivers. +Fedora 18-20 , and Ubuntu 14.04.1 LTS are the Linux OS supported. +You should install the *64bit* Kernel version. +More 64bit OS could be supported by compiling the drivers. + +WARNING: Only *64bit* Kernels are supported + +To verify that your kernel is 64bit version try this + +[source,bash] +---- +$uname -m +x86_64 #<1> +---- +<1> x86_64 is the desired output + ==== Download ISO file -- cgit 1.2.3-korg From c991b001f18f7d27fce5457c5d1ba5ea508b63c2 Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Tue, 13 Oct 2015 06:58:16 +0300 Subject: fixed link to fedora 20 error --- trex_book.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index df78de04..9c3c0fa9 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -165,7 +165,7 @@ The ISO images of the described Linux OS can be downloaded from the following li | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/18/Fedora/x86_64/iso/Fedora-18-x86_64-CHECKSUM[Fedora 18 CHECKSUM] | 2.| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-19-x86_64-DVD.iso[Fedora 19] | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-19-x86_64-CHECKSUM[Fedora 19 CHECKSUM] -| 3.| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-20-x86_64-DVD.iso[Fedora 20] +| 3.| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-DVD.iso[Fedora 20] | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-CHECKSUM[Fedora 20 CHECKSUM] | 4.|link:http://old-releases.ubuntu.com/releases/14.04.1/ubuntu-14.04-desktop-amd64.iso[Ubuntu 14.04.01] | http://old-releases.ubuntu.com/releases/14.04.1/SHA256SUMS[Ubuntu 14.04 CHECKSUM] -- cgit 1.2.3-korg From 551884d15dc7d3ab71bce168201c742aaf7396ef Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 15 Oct 2015 12:04:27 +0300 Subject: v1.78 --- release_notes.asciidoc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index b4496213..7f2191e4 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,19 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.78 == + +* some clean up in tuple generator + +=== fix issues: === + +Python API fixup see here +* link:http://csi-wiki-01:8090/issue/trex-126[trex-126] +* link:http://csi-wiki-01:8090/issue/trex-123[trex-122] + +Check for 64bit Kernel +* link:http://csi-wiki-01:8090/issue/trex-123[trex-123] + == Release 1.77 == -- cgit 1.2.3-korg From 1176d62bb2c9121af27d40650fce6bd819884064 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 15 Oct 2015 12:06:40 +0300 Subject: v1.78-1 --- release_notes.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 7f2191e4..fa268869 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -23,6 +23,7 @@ endif::backend-docbook[] == Release 1.78 == * some clean up in tuple generator +* trex stateles console works with trex-mock === fix issues: === -- cgit 1.2.3-korg From e82673cfb45ed8d28fa9414f7f3e4a87a0e36a68 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 15 Oct 2015 12:12:20 +0300 Subject: v1.78-2 --- release_notes.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index fa268869..1dfea31b 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -28,12 +28,13 @@ endif::backend-docbook[] === fix issues: === Python API fixup see here + * link:http://csi-wiki-01:8090/issue/trex-126[trex-126] * link:http://csi-wiki-01:8090/issue/trex-123[trex-122] Check for 64bit Kernel -* link:http://csi-wiki-01:8090/issue/trex-123[trex-123] +* link:http://csi-wiki-01:8090/issue/trex-123[trex-123] == Release 1.77 == -- cgit 1.2.3-korg From 24673005967ef97fccd546acb40fde7ba9402141 Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Thu, 22 Oct 2015 09:08:44 +0300 Subject: Updated RPC server document and annd visio drawings of phase 2.0 stateless. --- trex_rpc_server_spec.asciidoc | 11 ++++++++++- visio_drawings/trex_2.0_stateless.png | Bin 0 -> 1081192 bytes visio_drawings/trex_2.0_stateless.vsd | Bin 0 -> 935424 bytes 3 files changed, 10 insertions(+), 1 deletion(-) mode change 100644 => 100755 trex_rpc_server_spec.asciidoc create mode 100755 visio_drawings/trex_2.0_stateless.png create mode 100755 visio_drawings/trex_2.0_stateless.vsd diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc old mode 100644 new mode 100755 index e973aedc..c994b2d1 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -19,6 +19,10 @@ The TRex RPC Server | - added usage examples using Python code as Higher-level usage - added logic and explanation behind VM commands +| 1.1 | Dan Klein (danklei) +| +- Fixed some consistency issues +- added RPC interaction examples appendix |================= @@ -659,7 +663,7 @@ please also consider the following constraints: ==== The bytes needed for activating 'rx_stats': -* *rx_stream_id* consumes 2 bytes +* *stream_id* consumes 2 bytes * *seq_enabled* consumes 4 bytes * *latency_enabled* consumes 4 bytes @@ -677,6 +681,8 @@ if both are enabled then 10 bytes will be used. |================= | Field | Type | Description | enabled | boolean | is rx_stats enabled for this stream +| stream_id | int | stream_id for which to collect rx_stats. + +This could be stream_id different from the stream object which contains the rx_stats object. | seq_enabled | boolean | should write 32 bit sequence | latency_enabled | boolean | should write 32 bit latency |================= @@ -730,6 +736,9 @@ if both are enabled then 10 bytes will be used. ---- +This request-reply sequence demonstrate a method in which rx_stats are diabled. +In case rx_stats feature is enabled, rx_object **must include** all rx_stats object fields as described above. + === Remove Stream * *Name* - 'remove_stream' diff --git a/visio_drawings/trex_2.0_stateless.png b/visio_drawings/trex_2.0_stateless.png new file mode 100755 index 00000000..01787f99 Binary files /dev/null and b/visio_drawings/trex_2.0_stateless.png differ diff --git a/visio_drawings/trex_2.0_stateless.vsd b/visio_drawings/trex_2.0_stateless.vsd new file mode 100755 index 00000000..d46f2a59 Binary files /dev/null and b/visio_drawings/trex_2.0_stateless.vsd differ -- cgit 1.2.3-korg From e91c3198269b84ca0dbab2004729e5826b8c7436 Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Sun, 25 Oct 2015 07:53:46 +0200 Subject: Added RPC examples for add_stream method --- trex_rpc_server_spec.asciidoc | 217 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 216 insertions(+), 1 deletion(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index c994b2d1..b914a0ab 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -1299,4 +1299,219 @@ Finally, let's see the expected JSON output of the VM instructions: <5> `fix_checksum_ipv4` instruction for both ranging options [NOTE] -In this case only one checksum instruction has been generated, since both ranging options applies to the same IP header. \ No newline at end of file +In this case only one checksum instruction has been generated, since both ranging options applies to the same IP header. + + +:numbered!: + +[appendix] +Interaction Examples +-------------------- + +This appendix brings examples with data for the this RPC interaction. + + + +<<_add_stream, add_stream>> method example +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following example represents an interaction between the RPC client and the server's response. + +Simple single packet client request +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +On the following example, there's no VM instructions, rx_stats option is disabled and there's only a single packet which isn't connected to any other packet. + +[underline]#Client request# +[source, bash] +---- +{ + "id" : "2bqgd2r4", + "jsonrpc" : "2.0", + "method" : "add_stream", + "params" : { + "handler" : "37JncCHr", + "port_id" : 1, + "stream" : { + "enabled" : true, + "isg" : 0, + "mode" : { + "pps" : 100, + "type" : "continuous" + }, + "next_stream_id" : -1, + "packet" : { + "binary" : [ + 0, + 80, + 86, + 128, + 13, + ... # more packet data + 77, + 79, + 250, + 154, + 66 + ], + "meta" : "" + }, + "rx_stats" : { + "enabled" : false + }, + "self_start" : true, + "vm" : [] + }, + "stream_id" : 0 + } +} + +---- + +[underline]#Server's response# +[source, bash] +---- +{ + "id" : "2bqgd2r4", + "jsonrpc" : "2.0", + "result" : "ACK" +} + +---- + + +Two linked packets with VM instructions client request +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +On the following example, a **batch request** is being issued to the server, containing two `add_stream` requests. + +[underline]#First request# + +The first client request is similar to the previous example. + +However, in this case the rx_stats object is enbaled and set to monitor ancestor's `stream_id` (which is 0 in this case). + +Ontop, this stream points to the next stream as the one to follow, as described under `next_stream_id` of `stream` object. + +[underline]#Second request# + +In this stream the big difference is that it has VM instructions under the `vm` field of the `stream` object. + +Ontop, this stream is the last stream of the sequence, so `next_stream_id` of `stream` object is set to '-1'. + + +[underline]#Client request# + +[source, bash] +---- +[ + { + "id" : "tq49f6uj", + "jsonrpc" : "2.0", + "method" : "add_stream", + "params" : { + "handler" : "2JjzhMai", + "port_id" : 3, + "stream" : { + "enabled" : true, + "isg" : 0, + "mode" : { + "pps" : 100, + "type" : "continuous" + }, + "next_stream_id" : 1, + "packet" : { + "binary" : [ + 0, + 80, + 86, + ... # more packet data + 250, + 154, + 66 + ], + "meta" : "" + }, + "rx_stats" : { + "enabled" : true, + "latency_enabled" : false, + "seq_enabled" : false, + "stream_id" : 0 + }, + "self_start" : true, + "vm" : [] + }, + "stream_id" : 0 + } + }, + { + "id" : "2m7i5olx", + "jsonrpc" : "2.0", + "method" : "add_stream", + "params" : { + "handler" : "2JjzhMai", + "port_id" : 3, + "stream" : { + "enabled" : true, + "isg" : 0, + "mode" : { + "pps" : 200, + "type" : "continuous" + }, + "next_stream_id" : -1, + "packet" : { + "binary" : [ + 0, + 80, + 86, + 128, + ... # more packet data + 216, + 148, + 25 + ], + "meta" : "" + }, + "rx_stats" : { + "enabled" : false + }, + "self_start" : false, + "vm" : [ + { + "init_value" : "65537", + "max_value" : "65551", + "min_value" : "65537", + "name" : "l3__src", + "op" : "inc", + "size" : 4, + "type" : "flow_var" + }, + { + "add_value" : 1, + "is_big_endian" : false, + "name" : "l3__src", + "pkt_offset" : 34, + "type" : "write_flow_var" + } + ] + }, + "stream_id" : 1 + } + } +] + +---- + +[underline]#Server's response# +[source, bash] +---- +[ + { + "id" : "tq49f6uj", + "jsonrpc" : "2.0", + "result" : "ACK" + }, + { + "id" : "2m7i5olx", + "jsonrpc" : "2.0", + "result" : "ACK" + } +] + +---- \ No newline at end of file -- cgit 1.2.3-korg From 4f20acd25e9b818919858df5b8c7a2a1527caa9a Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Sun, 25 Oct 2015 09:05:12 +0200 Subject: Added updated RPC arch image, updated authors --- images/rpc_server_big_picture.png | Bin 54566 -> 1081192 bytes images/rpc_server_big_picture_old.png | Bin 0 -> 54566 bytes trex_rpc_server_spec.asciidoc | 6 +++--- 3 files changed, 3 insertions(+), 3 deletions(-) mode change 100644 => 100755 images/rpc_server_big_picture.png create mode 100644 images/rpc_server_big_picture_old.png diff --git a/images/rpc_server_big_picture.png b/images/rpc_server_big_picture.png old mode 100644 new mode 100755 index dae6976d..01787f99 Binary files a/images/rpc_server_big_picture.png and b/images/rpc_server_big_picture.png differ diff --git a/images/rpc_server_big_picture_old.png b/images/rpc_server_big_picture_old.png new file mode 100644 index 00000000..dae6976d Binary files /dev/null and b/images/rpc_server_big_picture_old.png differ diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index b914a0ab..32007b3f 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -1,8 +1,8 @@ The TRex RPC Server =================== -:author: Itay Marom -:email: -:revnumber: 1.01 +:Author: Itay Marom, Dan Klein +:email: trex-dev@cisco.com +:revnumber: 1.1 :quotes.++: :numbered: :web_server_url: http://trex-tgn.cisco.com/trex -- cgit 1.2.3-korg From 5c04563aae8893c4e55c8a6e494d961ea8410215 Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Mon, 26 Oct 2015 08:21:11 +0200 Subject: Updates to ws scripts- add build_cp_docs rule in more robust way --- wscript | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/wscript b/wscript index 3302ec58..31b5c9c1 100755 --- a/wscript +++ b/wscript @@ -87,6 +87,7 @@ def options(opt): def configure(conf): conf.find_program('asciidoc', path='/usr/local/bin/', var='ASCIIDOC') + conf.find_program('sphinx-build', path_list='/usr/local/bin/', var='SPHINX') pass; def convert_to_pdf(task): @@ -119,17 +120,22 @@ def do_visio(bld): for x in bld.path.ant_glob('visio\\*.vsd'): tg = bld(rule='${VIS} -i ${SRC} -o ${TGT} ', source=x, target=x.change_ext('.png')) -def build_cp_docs (trex_src_dir, dest_dir = "_build", builder = "html"): - build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -b {bld} {src} {dst}".format( - bld= builder, - src= ".", - dst= dest_dir) +#def build_cp_docs (trex_src_dir, dest_dir = "_build", builder = "html"): +def build_cp_docs (task): + out_dir = task.outputs[0].abspath() + export_path = os.path.join(os.getcwd(), 'build', 'cp_docs') + trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") + if not os.path.isdir(trex_core_git_path): + trex_core_git_path = os.getenv('TREX_CORE_GIT', None) + if trex_core_git_path: # there exists a default directory or the desired ENV variable. + trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc')) + build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( + bld= "html", + src= ".", + dst= out_dir) ) - bld_path = os.path.abspath( os.path.join(trex_src_dir, 'scripts', 'automation', 'trex_control_plane', 'doc') ) - ret_val = subprocess.call(build_doc_cmd, cwd = bld_path) - if ret_val: - raise RuntimeError("Build operation of control plain docs failed with return value {ret}".format(ret= ret_val)) - return + return subprocess.call(build_doc_cmd, cwd = trex_core_docs_path) + return (1) def build(bld): @@ -203,11 +209,20 @@ def build(bld): bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='trex_control_plane_peek.asciidoc waf.css', target='trex_control_plane_peek.html', scan=ascii_doc_scan) +# bld(rule=build_cp_docs, +# source='1.txt', target='cp_docs', scan=ascii_doc_scan) + # generate control plane documentation export_path = os.path.join(os.getcwd(), 'build', 'cp_docs') - trex_core_git_path = os.getenv('TREX_CORE_GIT', None) - if trex_core_git_path: # there exists the desired ENV variable. - build_cp_docs(trex_core_git_path, dest_dir= export_path) + trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") + if not os.path.isdir(trex_core_git_path): + trex_core_git_path = os.getenv('TREX_CORE_GIT', None) + if trex_core_git_path: # there exists a default directory or the desired ENV variable. + trex_core_docs_path = os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc', 'index.rst') + bld(rule=build_cp_docs, +# source = '1.txt',#trex_core_docs_path, + target = 'cp_docs') + # build_cp_docs(trex_core_git_path, dest_dir= export_path) else: raise NameError("Environment variable 'TREX_CORE_GIT' is not defined.") -- cgit 1.2.3-korg From 80d312fdc044d289fda20767beb5df9c82396b9a Mon Sep 17 00:00:00 2001 From: Dan Klein Date: Tue, 27 Oct 2015 01:50:02 +0200 Subject: updated path_list of asciidoc program --- wscript | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wscript b/wscript index 31b5c9c1..3c7f5e90 100755 --- a/wscript +++ b/wscript @@ -86,7 +86,7 @@ def options(opt): opt.add_option('--exe', action='store_true', default=False, help='Execute the program after it is compiled') def configure(conf): - conf.find_program('asciidoc', path='/usr/local/bin/', var='ASCIIDOC') + conf.find_program('asciidoc', path_list='/usr/bin/', var='ASCIIDOC') conf.find_program('sphinx-build', path_list='/usr/local/bin/', var='SPHINX') pass; -- cgit 1.2.3-korg From c63cb853e8cc626cae04c49ff63cddc42364648e Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 8 Nov 2015 12:38:26 +0200 Subject: add trex_console proposal --- trex_console.asciidoc | 517 ++++++++++++++++++++++++++++++++++++++++++++++++++ wscript | 3 + 2 files changed, 520 insertions(+) create mode 100644 trex_console.asciidoc diff --git a/trex_console.asciidoc b/trex_console.asciidoc new file mode 100644 index 00000000..467ff034 --- /dev/null +++ b/trex_console.asciidoc @@ -0,0 +1,517 @@ +TRex console - commands proposal +================================= +:author: Hanoch Haim +:email: +:revnumber: 0.1 +:quotes.++: +:numbered: +:web_server_url: http://trex-tgn.cisco.com/trex +:local_web_server_url: csi-wiki-01:8181/trex + + +== Console + +=== Overview + +The console will use TRex Client API for controling TRex +Some guidelines: + +* Console should not save state - it should sync with server to get the state - in case of crash/exit in Console it could sync at startup to the server +* Let's assume users acquire all ports - for simplicity +* Commands will be like bash shell commands - no order, many flags +* Ability to show stats in real time. gives the option to open two Console one for stats and one for commands + +=== Ports State + +[options="header",cols="^1,3a"] +|================= +| state | meaning +| IDLE | no streams, does not work +| STREAMS | with streams, does not work +| WORK | with streams, works +| PAUSE | with streams, pause +|================= + + +[source,bash] +---- + + IDLE -> (add streams) -> STREAMS (start) -> WORK (stop) -> STREAMS (start) + | WORK (pause) -> PAUSE (resume )--- + | | + | | + ------------------------------------ + +----- + +=== Commands + +==== Connect + +[source,bash] +---- + +$trex-con [--port $PORT] [--ip $IP] [--async_port port] + --port change the default server - default 4505 for async ZMQ) + --async_port for sub/pub ZMQ - default 4506 + --ip default 127.0.0.1 +---- + +This command +* try to connect to server +* send ping command +* get all the ports info / streams info +* read counters stats for a ref + + +==== reset + +Reset the server and client to a known state - should not be used in a normal scenario + +[source,bash] +---- +$reset +---- + +- force acuire all the ports +- Stop all traffic on all the ports +- Remove all the streams from all the ports + + +==== port + +Configure port state, autoneg, rate etc + +[source,bash] +---- +$port --port 1 --cfg "auto/10/" + + -port [id] + --cfg string with the configuration name + +---- + + +==== clear + +Clear all port stats counters + +[source,bash] +---- +$clear +---- + + + +==== stats + +Shows global and port statistic + +[source,bash] +---- +$stats [-g] [-p] [-ps] [--port mask ] + + -g show only global stats + -p only ports stats + -ps only port status (type/driver/link-up/down/negotion type etc) + --port mask on the port for example --port 2 3 will show only port 2,3 + +---- + +Examples + + +[source,bash] +---- +$stats -g + +Connected : 127.0.0.1 4500 +Version : 1.78 UUID : 12121212 +CPU : 12.0 %% +Total TX : 20.2 Gb/sec +Total Rx : 20.2 Gb/sec +Total PPS : 100MPPS +Total Streams : 10 +Active ports : 4 +---- + +[source,bash] +---- +$stats -p + + port 0 1 2 3 + ------------------------------------ + owner my my my my - place holder no need to implement as we takes all port avali + active on on off off + tx-bytes 12131 0 0 0 + rx-bytes 0 0 0 0 + tx-pkts 0 0 0 0 + rx-pkts 0 0 0 0 + tx-errors 0 0 0 0 + rx-errors 0 0 0 0 + Tx-Bw 12gb 1.3Gb 0 0 + Rx-Bw 10mb 11.2mb 0 0 +---- + +In case of more than four ports should show only the first ports or by mask ( --port mask) + + +[source,bash] +---- +$stats -ps + + --- port status + port 0 1 2 3 + ------------------------------------ + port-type I350 I350 I350 I350 + maximum 1Gb 1Gb 1Gb !gb + link on on off off +---- + + +==== streams + +Shows the configured streams on each port/ports +Should show from client cache + +[source,bash] +---- +$streams [-port mask] [-port 0xff] [--streams mask] [-f] [--full] [--graph] + + --port mask, e.g --port 1 2 3 4 + --streams mask e.g. --streams 1 2 + -f /--full print stream info in a JSON format with all the information + --graph : add the graph in time of each port stream +---- + + +example + +[source,bash] +---- +$streams + +port 0 : imix/a.yaml + + stream id , packet type , length , mode , rate , next + + 0 , ip/tcp , 64 , continues , 100KPPS , none + + 1 , ip/udp , 128 , burst , 200KPPS , none + + 2 , ip/udp , 1500 , multi-burst , 100KPPS , none + + + +port 1 : imix/a.yaml + + + 0 , ip/tcp , 64 , continues , 100KPPS , none + + 1 , ip/udp , 128 , burst , 200KPPS , none + + 2 , ip/udp , 1500 , multi-burst , 100KPPS , none + +---- + + +show only port 1 and 2 + +[source,bash] +---- +$streams --port 1 2 + + .. + .. +---- + +[source,bash] +---- +$streams --port 0 --streams 0 -f + + + show the full info on stream 0 and port 0, print in JSON format + +---- + + + + +==== start + +* work on a set of ports +* remove all streams +* load new streams +* start traffic with specific multiplier +* limit the traffic to a specific duration +* port state should be stopped, in case of --force stop the port +* in case one of the port is not stop don't start any port + +[source,bash] +---- +$start [--force] [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] [-f stl/imix.yaml] [-db ab] [-d 100] [-d 10m] [-d 1h] [-m 100] [-m 10gb] [-m 10kpps] [-m 40%] + + + port mask : + [-a] : all ports + [-port 1 2 3] : port 1,2 3 + [-port 0xff] : port by mask + [-port clients/servers] : clients side or server side + + + + stream to load: + -f stl/imix.yaml : load from local disk the streams file + --db stream that was loaded to db + + + + duration: + -d 100 : in sec + -d 10m : in min + -d 1h : in hours + + in case of no duration, it will stop only if stream should stop + + + multiplier : + + -m 100 : multiply stream file by this factor + -m 10gb : from graph calculate the maximum rate as this bandwidth ( for each port ) + -m 10kpps : from graph calculate the maximum rate as this pps ( for each port ) + -m 40% : from graph calculate the maximum rate as this precent from total port ( for each port ) + + + force: + --force stop ports if they are active + +---- + +examples + + +[source,bash] +---- +$start -a -f stl/imix.yaml -m 10gb +---- +start this profile on all all ports maximum bandwidth is 10gb + + +[source,bash] +---- +$start -port 1 2 -f stl/imix.yaml -m 100 +---- +start this profile on port 1,2 multiply by 100 + + +[NOTE] +===================================== + in case of start command without args, try to remember the last args given and reprint them +===================================== + +==== stop + +* work on a set of ports +* change the mode of the port to stopped +* do not remove the streams +* in case port state is already stopped don't do anything + +[source,bash] +---- +$stop [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] + + See ports command explanation from the start + +---- + + +==== pause + +* work on a set of ports +* move a wokring set of ports to a state of pause + + +[source,bash] +---- +$pause [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] + + see ports command explanation from start + +---- + + +==== resume + +* work on a set of ports +* move a wokring set of port to a state of resume + + +[source,bash] +---- +$resume [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] + + see ports command explanation from start + +---- + + +==== restart + +* restart the work on the loaded streams +* same as start without the -f /--db switch + +[source,bash] +---- +$restart [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] [-d duration] [-m multiplier] + + see ports command explanation from start + +---- + +==== update + + +[source,bash] +---- +>update [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] [-m 100] [-m 10gb] [-m 10kpps] [-m 40%] +---- +Update the bandwidth multiplier for a mask of ports + + +[NOTE] +===================================== + Here we could add the ability to disable/enable specific stream, load new stream dynamically etc. +===================================== + + +==== async events queue + +there are two ways to know if somthing async happned + +* pool the state +* get async event + +example for events are: + +* link is up/down +* port id stoped +* port id start +* errors +* info + + +[source,bash] +---- +$clear_events +---- + +clear events queue + +[source,bash] +---- +$show_events +---- +show a list of events from the queue + +[source,bash] +---- +$remove --event [event-id] --top + --event : remove the event-id from the list + --top : remove the even from the top +---- + + +[source,bash] +---- +$wait_for_event [event-id] +---- +wait only in script mode, simple way to wait for event like all port stopped + + +==== stream database commands + +* load/remove/show streams from memory + + +[source,bash] +---- +$db_load -f [stream ] -name [name] +---- + +[source,bash] +---- +$db_remove -name [name] +---- + +[source,bash] +---- +$db_show [--all] [--name $name] [--full] +---- + + +==== script + +[source,bash] +---- +$script -f script_name +----- + +run script of commands + + +==== tui + +shows the stats in a textual window (like top) + +[source,bash] +---- +$tui +---- + +enter to a mode of Stats and present 3 type of windows +* global/port stats/version/connected etc +* per port +* per port streams info + + +get keyboard + q - quit the gui window + c - clear all counters + + +=== Priorty + +* logger - JSON-RPC into a file ( req/res) +* start/stop/stats/tui/streams/restart/reset +* db +* port +* events +* pause/resume/restart + + +=== More ideas + +* define a YAML format that has ports inside so in away load each YAML to each port +* add ability to load range of ip/mac program in YAML file + fields : + name : ipv4.src + offset : 12 + range : + min_ip : 10.0.0.1/ipv6 addr + max_ip : 10.0.0.20 + inc : 1 + dec : 1 + start : 10.0.0.4 + + name : ipv4.dest + offset : 45 + range : + min_ip : 10.0.0.1 + max_ip : 10.0.0.2 + + +=== Change log + +[options="header",cols="^1,^h,3a"] +|================= +| Version | name | meaning +| 1.00 | Hanoch Haim (hhaim) | +- first version +|================= + + + diff --git a/wscript b/wscript index 3302ec58..c7111c60 100755 --- a/wscript +++ b/wscript @@ -203,6 +203,9 @@ def build(bld): bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='trex_control_plane_peek.asciidoc waf.css', target='trex_control_plane_peek.html', scan=ascii_doc_scan) + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='trex_console.asciidoc waf.css', target='trex_console.html', scan=ascii_doc_scan) + # generate control plane documentation export_path = os.path.join(os.getcwd(), 'build', 'cp_docs') trex_core_git_path = os.getenv('TREX_CORE_GIT', None) -- cgit 1.2.3-korg From eff0278230118e380db0df860bf47652826b6042 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 9 Nov 2015 16:20:36 +0200 Subject: trex console minors --- trex_console.asciidoc | 154 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 98 insertions(+), 56 deletions(-) diff --git a/trex_console.asciidoc b/trex_console.asciidoc index 467ff034..18f3441d 100644 --- a/trex_console.asciidoc +++ b/trex_console.asciidoc @@ -7,7 +7,7 @@ TRex console - commands proposal :numbered: :web_server_url: http://trex-tgn.cisco.com/trex :local_web_server_url: csi-wiki-01:8181/trex - +:toc::[levels=3] == Console @@ -16,10 +16,13 @@ TRex console - commands proposal The console will use TRex Client API for controling TRex Some guidelines: -* Console should not save state - it should sync with server to get the state - in case of crash/exit in Console it could sync at startup to the server -* Let's assume users acquire all ports - for simplicity -* Commands will be like bash shell commands - no order, many flags -* Ability to show stats in real time. gives the option to open two Console one for stats and one for commands +* Console should not save it own state, it should only cache server state. It assumed there is only one console that has R/W capability so once connected as R/W console (per user/port) it could read the server state and then cache all the operations. +* There could be many read-only clients for same user same ports. The ability to enforce it does not yet exits in the server (will be done). +* Console should sync with server to get the state in connection time and cache the server information locally once the state was changed +* In case of crash/exit of the Console it should sync again at startup +* Let's assume Console acquire all ports - for simplicity, for now. +* Commands will be like bash shell - no order args, many flags +* Ability to show stats in real time. gives the option to open two Console one for stats and one for commands ( many readonly clients) === Ports State @@ -44,25 +47,78 @@ Some guidelines: ----- +=== Common Arguments + +This section includes arguments that are common to many commands +In the command they will be marked like this (arg name) + +==== Port mask + +this gives the ability to choose batch of ports + +[source,bash] +---- +$command [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] + + port mask : + [-a] : all ports + [-port 1 2 3] : port 1,2 3 + [-port 0xff] : port by mask 0x1 for port 0 0x3 for port 0 and 1 + [-port clients/servers] : -port clients will choose all the client side ports +---- + +==== Duration + +duration in second or in min or hours + +[source,bash] +---- +$command[-d 100] [-d 10m] [-d 1h] + + duration: + -d 100 : in sec + -d 10m : in min + -d 1h : in hours +---- + + +==== Multiplier + +[source,bash] +---- +$command [-m 100] [-m 10gb] [-m 10kpps] [-m 40%] + + multiplier : + + -m 100 : multiply stream file by this factor + -m 10gb : from graph calculate the maximum rate as this bandwidth ( for each port ) + -m 10kpps : from graph calculate the maximum rate as this pps ( for each port ) + -m 40% : from graph calculate the maximum rate as this precent from total port ( for each port ) +---- + + === Commands + ==== Connect [source,bash] ---- -$trex-con [--port $PORT] [--ip $IP] [--async_port port] - --port change the default server - default 4505 for async ZMQ) - --async_port for sub/pub ZMQ - default 4506 - --ip default 127.0.0.1 +$trex-con [--ip $IP] [--server $IP] [--rpc-port $PORT] [--async_port port] + + --rpc-port : change the default server - default 5505 for RPC + + --async_port : for sub/pub ZMQ - default 4505 + + --ip or --server :default 127.0.0.1 the TRex server ip ---- This command * try to connect to server * send ping command -* get all the ports info / streams info -* read counters stats for a ref - +* sync with all the ports info / streams info state +* read all counters stats for reference ==== reset @@ -84,9 +140,8 @@ Configure port state, autoneg, rate etc [source,bash] ---- -$port --port 1 --cfg "auto/10/" +$port (port mask) --cfg "auto/10/" - -port [id] --cfg string with the configuration name ---- @@ -98,23 +153,21 @@ Clear all port stats counters [source,bash] ---- -$clear +$clear (port mask) ---- - ==== stats Shows global and port statistic [source,bash] ---- -$stats [-g] [-p] [-ps] [--port mask ] +$stats (port mask) [-g] [-p] [-ps] -g show only global stats -p only ports stats -ps only port status (type/driver/link-up/down/negotion type etc) - --port mask on the port for example --port 2 3 will show only port 2,3 ---- @@ -176,7 +229,7 @@ Should show from client cache [source,bash] ---- -$streams [-port mask] [-port 0xff] [--streams mask] [-f] [--full] [--graph] +$streams (port mask) [--streams mask] [-f] [--full] [--graph] --port mask, e.g --port 1 2 3 4 --streams mask e.g. --streams 1 2 @@ -240,42 +293,17 @@ $streams --port 0 --streams 0 -f * limit the traffic to a specific duration * port state should be stopped, in case of --force stop the port * in case one of the port is not stop don't start any port +* all ports should be in state IDLE or STREAMS [source,bash] ---- -$start [--force] [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] [-f stl/imix.yaml] [-db ab] [-d 100] [-d 10m] [-d 1h] [-m 100] [-m 10gb] [-m 10kpps] [-m 40%] - - - port mask : - [-a] : all ports - [-port 1 2 3] : port 1,2 3 - [-port 0xff] : port by mask - [-port clients/servers] : clients side or server side - +$start [--force] (port mask) [-f stl/imix.yaml] [-db ab] (duration) (multiplier) stream to load: -f stl/imix.yaml : load from local disk the streams file --db stream that was loaded to db - - - - duration: - -d 100 : in sec - -d 10m : in min - -d 1h : in hours - - in case of no duration, it will stop only if stream should stop - - multiplier : - - -m 100 : multiply stream file by this factor - -m 10gb : from graph calculate the maximum rate as this bandwidth ( for each port ) - -m 10kpps : from graph calculate the maximum rate as this pps ( for each port ) - -m 40% : from graph calculate the maximum rate as this precent from total port ( for each port ) - - force: --force stop ports if they are active @@ -309,10 +337,12 @@ start this profile on port 1,2 multiply by 100 * change the mode of the port to stopped * do not remove the streams * in case port state is already stopped don't do anything +* all ports should be in state WORK + [source,bash] ---- -$stop [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] +$stop (port mask) See ports command explanation from the start @@ -323,11 +353,13 @@ $stop [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] * work on a set of ports * move a wokring set of ports to a state of pause +* all ports should be in state WORK + [source,bash] ---- -$pause [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] +$pause (port mask) see ports command explanation from start @@ -338,11 +370,13 @@ $pause [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] * work on a set of ports * move a wokring set of port to a state of resume +* all ports should be in state PAUSE + [source,bash] ---- -$resume [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] +$resume (port mask) see ports command explanation from start @@ -353,10 +387,11 @@ $resume [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] * restart the work on the loaded streams * same as start without the -f /--db switch +* all ports should be in state STREAMS [source,bash] ---- -$restart [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] [-d duration] [-m multiplier] +$restart (port mask) (duration) (multiplier) see ports command explanation from start @@ -364,10 +399,12 @@ $restart [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] [-d duration] ==== update +* all ports should be in state WORK + [source,bash] ---- ->update [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] [-m 100] [-m 10gb] [-m 10kpps] [-m 40%] +>update (port mask) (multiplier) ---- Update the bandwidth multiplier for a mask of ports @@ -475,17 +512,20 @@ get keyboard === Priorty -* logger - JSON-RPC into a file ( req/res) -* start/stop/stats/tui/streams/restart/reset +* Console logger - JSON-RPC into a file ( req/res) +* start/stop/stats/tui/streams/reset * db * port * events -* pause/resume/restart - +* pause/resume/restart/restart +* scripts +* move all the debug commands to be dbg_xxx +* implement advance -m ( by reading graphs) +* Enforcement of one user/port with R/W capability === More ideas -* define a YAML format that has ports inside so in away load each YAML to each port +* define a YAML format that include stream per port inside so in away load each YAML to each port * add ability to load range of ip/mac program in YAML file fields : name : ipv4.src @@ -511,6 +551,8 @@ get keyboard | Version | name | meaning | 1.00 | Hanoch Haim (hhaim) | - first version +| 1.01 | Hanoch Haim (hhaim) | +- Incorporate Itay comments |================= -- cgit 1.2.3-korg From e5fd1d6d2004654a7928de359cbeda615724ecaa Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 9 Nov 2015 17:20:20 +0200 Subject: trex cons --- trex_console.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trex_console.asciidoc b/trex_console.asciidoc index 18f3441d..12999f04 100644 --- a/trex_console.asciidoc +++ b/trex_console.asciidoc @@ -7,7 +7,7 @@ TRex console - commands proposal :numbered: :web_server_url: http://trex-tgn.cisco.com/trex :local_web_server_url: csi-wiki-01:8181/trex -:toc::[levels=3] +:toclevels: 4 == Console -- cgit 1.2.3-korg From ef5cbdbb4ec5e2075f968a56b2da893b230cc98a Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Fri, 13 Nov 2015 00:39:47 +0200 Subject: v1.79 --- release_notes.asciidoc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 1dfea31b..dd236105 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,14 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.79 == + +* initial support for stateless +** only continues streams +** No VM support +** more info how to enable the interactive shell link:trex_console.html + + == Release 1.78 == * some clean up in tuple generator -- cgit 1.2.3-korg From a022f5095f069ff3f0c7a30c841c420e01c4d420 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Fri, 13 Nov 2015 00:48:48 +0200 Subject: v1.79 update console doc --- trex_console.asciidoc | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/trex_console.asciidoc b/trex_console.asciidoc index 12999f04..074130e6 100644 --- a/trex_console.asciidoc +++ b/trex_console.asciidoc @@ -47,6 +47,46 @@ Some guidelines: ----- +=== Tutorial + +First run trex in interactive mode + +[source,bash] +---- +$sudo ./t-rex-64 -i +---- + +on the same machine from a different window connect to to trex + +[source,bash] +---- +$sudo ./t-rex-64 -i +---- +$./trex-console +---- + + +from console you can run this + +[source,bash] +---- +$sudo ./t-rex-64 -i +---- + +# will start traffic on all port +>start -a -m 1 -f stl/imix_1pkt.yaml +# stop traffic on all port +>stop -a + +# show dynamic statistic +>tui + +#stop all and remove all stats +>reset + +---- + + === Common Arguments This section includes arguments that are common to many commands -- cgit 1.2.3-korg From 2e70d03aa7e9bff245f1da2d4d0012da95f4c7fb Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Sun, 22 Nov 2015 11:09:24 -0500 Subject: NUMA example with 40Gb/s NICs --- images/different_numa.png | Bin 0 -> 31915 bytes images/same_numa.png | Bin 0 -> 21704 bytes trex_book.asciidoc | 34 +++++++++++++++++++++++++++++++++- 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100755 images/different_numa.png create mode 100755 images/same_numa.png diff --git a/images/different_numa.png b/images/different_numa.png new file mode 100755 index 00000000..a8be8a9e Binary files /dev/null and b/images/different_numa.png differ diff --git a/images/same_numa.png b/images/same_numa.png new file mode 100755 index 00000000..a9a0466e Binary files /dev/null and b/images/same_numa.png differ diff --git a/trex_book.asciidoc b/trex_book.asciidoc index c8bfa609..04fcd718 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -124,7 +124,14 @@ TRex curretly works on x86 architecture and can operates well on Cisco UCS hardw | E1000 | paravirtualize | vmWare/KVM/VirtualBox |================= -IMPORTANT: Intel SFP+ 10Gb/Sec is the only one supported by default on the standard Linux driver. TRex also supports Cisco 10Gb/sec SFP+. +[IMPORTANT] +===================================== +* Intel SFP+ 10Gb/Sec is the only one supported by default on the standard Linux driver. TRex also supports Cisco 10Gb/sec SFP+. +* Using different NUMA for different NIC is very important when getting to high speeds, such as using several Intel XL710 40Gb/sec. +* One can verify NUMA and NIC topology with following command: lstopo (yum install hwloc) +* NUMAs-CPUs relation is determined with following command: lscpu +* See real example of NUMA usage xref:numa-example[here] +===================================== .Sample order for UCSC-C220-M3S with 4x10Gb ports [options="header",cols="2,1^",width="50%"] @@ -1106,7 +1113,32 @@ a configuration file now has the folowing struct to support multi instance <8> Socket of the dual interfaces, in this example of 03:00.0 and 03:00.1, memory should be local to the interface <9> Thread to be used, should be local to the NIC +*Real example:* anchor:numa-example[] + +We've connected 2 Intel XL710 NICs close to each other on motherboard, they shared same NUMA: + +image:images/same_numa.png[title="2_NICSs_same_NUMA"] + +The CPU utilization was very high ~100%, with c=2 and c=4 the results were same. +Then, we moved the cards to different NUMAs: + +image:images/different_numa.png[title="2_NICSs_different_NUMAs"] + +*+* +We needed to add configuration to the /etc/trex_cfg.yaml: + +[source,python] + platform : + master_thread_id : 0 + latency_thread_id : 8 + dual_if : + - socket : 0 + threads : [1, 2, 3, 4, 5, 6, 7] + - socket : 1 + threads : [9, 10, 11, 12, 13, 14, 15] + +This gave best results, and CPU utilization with c=7 at *\~98 Gb/s* TX BW became *~21%*! (40% with c=4) === Command line options anchor:cml-line[] -- cgit 1.2.3-korg From 72a094f2595e5d1ed3afb19e60231d3fcd73dc30 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Sun, 22 Nov 2015 13:17:34 -0500 Subject: grep Network -> grep Ethernet, Note on Fedora 18 & Intel 40Gb/s NICs typos --- trex_book.asciidoc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 04fcd718..54c23bf5 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -131,6 +131,7 @@ TRex curretly works on x86 architecture and can operates well on Cisco UCS hardw * One can verify NUMA and NIC topology with following command: lstopo (yum install hwloc) * NUMAs-CPUs relation is determined with following command: lscpu * See real example of NUMA usage xref:numa-example[here] +* Using Intel XL710 with Fedora 18 requires updating Kernel. ===================================== .Sample order for UCSC-C220-M3S with 4x10Gb ports @@ -214,7 +215,7 @@ The following is an example of 4x10Gb/sec TRex with I350 management port and fou [source,bash] ---- -$[root@trex]lspci | grep Network +$[root@trex]lspci | grep Ethernet 01:00.0 Ethernet controller: Intel Corporation I350 Gigabit Network Connection (rev 01) #<1> 01:00.1 Ethernet controller: Intel Corporation I350 Gigabit Network Connection (rev 01) #<2> 03:00.0 Ethernet controller: Intel Corporation 82599EB 10-Gigabit SFI/SFP+ Network Connection (rev 01) #<3> @@ -1126,7 +1127,7 @@ Then, we moved the cards to different NUMAs: image:images/different_numa.png[title="2_NICSs_different_NUMAs"] *+* -We needed to add configuration to the /etc/trex_cfg.yaml: +We added configuration to the /etc/trex_cfg.yaml: [source,python] platform : @@ -1138,7 +1139,7 @@ We needed to add configuration to the /etc/trex_cfg.yaml: - socket : 1 threads : [9, 10, 11, 12, 13, 14, 15] -This gave best results, and CPU utilization with c=7 at *\~98 Gb/s* TX BW became *~21%*! (40% with c=4) +This gave best results: with *\~98 Gb/s* TX BW and c=7, CPU utilization became *~21%*! (40% with c=4) === Command line options anchor:cml-line[] -- cgit 1.2.3-korg From 343c254861208ab3af3068c98e0d3f3df08d36d6 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Mon, 23 Nov 2015 05:32:25 -0500 Subject: Intel 40Gb/s Fimaware Threads/Sockets clarification. --- trex_book.asciidoc | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 54c23bf5..321c72b1 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -132,6 +132,15 @@ TRex curretly works on x86 architecture and can operates well on Cisco UCS hardw * NUMAs-CPUs relation is determined with following command: lscpu * See real example of NUMA usage xref:numa-example[here] * Using Intel XL710 with Fedora 18 requires updating Kernel. +* For Intel XL710 NICs need to verify the FW is v4.42 or v4.53: +** > sudo ./t-rex-64 -f cap2/dns.yaml -d 0 --no-flow-control *-v 6* --nc | grep FW +** PMD: *FW 4.22* API 1.2 NVM 04.02.04 eetrack 800013fc +** PMD: *FW 4.22* API 1.2 NVM 04.02.04 eetrack 800013fc +** PMD: *FW 4.22* API 1.2 NVM 04.02.04 eetrack 800013fc +** PMD: *FW 4.22* API 1.2 NVM 04.02.04 eetrack 800013fc + + + ===================================== .Sample order for UCSC-C220-M3S with 4x10Gb ports @@ -1111,8 +1120,8 @@ a configuration file now has the folowing struct to support multi instance <5> the platform section <6> The thread_id for control <7> The thread_id for latency if used -<8> Socket of the dual interfaces, in this example of 03:00.0 and 03:00.1, memory should be local to the interface -<9> Thread to be used, should be local to the NIC +<8> Socket of the dual interfaces, in this example of 03:00.0 and 03:00.1, memory should be local to the interface. (Currently dual interface can't use 2 NUMAs.) +<9> Thread to be used, should be local to the NIC. The threads are pinned to cores, thus specifying threads is like specifying cores. *Real example:* anchor:numa-example[] -- cgit 1.2.3-korg From b8a5aed1dbb15d7add8e6dba4af0bf70f22b8c82 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 1 Dec 2015 07:43:23 +0200 Subject: v1.80 minor changes --- images/Thumbs.db | Bin 106496 -> 162816 bytes release_notes.asciidoc | 17 ++++++++++++----- trex_book.asciidoc | 14 +------------- trex_console.asciidoc | 9 ++------- 4 files changed, 15 insertions(+), 25 deletions(-) diff --git a/images/Thumbs.db b/images/Thumbs.db index 98a8ffc0..fc09ef9a 100755 Binary files a/images/Thumbs.db and b/images/Thumbs.db differ diff --git a/release_notes.asciidoc b/release_notes.asciidoc index dd236105..822c842f 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,13 +20,20 @@ ifdef::backend-docbook[] endif::backend-docbook[] -== Release 1.79 == +== Release 1.80 == + +* more stateless support +** All type of streams are supported (Continues/Burst/Multi-burst) +** Stream can call to other streams +** start/stop/pause/resume work from the Console +** -m[rate] is supported for example -m10gbps or -m10kpps from console +** update XL710 installation support -* initial support for stateless -** only continues streams -** No VM support -** more info how to enable the interactive shell link:trex_console.html +== Release 1.79 == +* Initial support for stateless +** Only continues streams are supported +** more info how to enable the interactive shell link:trex_console.html[here] == Release 1.78 == diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 321c72b1..c63c570f 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -7,21 +7,9 @@ TRex :numbered: :web_server_url: http://trex-tgn.cisco.com/trex :local_web_server_url: csi-wiki-01:8181/trex +:toclevels: 4 -== Change log - -[options="header",cols="^1,^h,3a"] -|================= -| Version | name | meaning -| 1.77-0.0 | Hanoh Haim (hhaim) | -- initail versions -| 1.77.1 | Dan Klein (danklei) -| -- fixed ISO images references and supported linux supported OS - -|================= - == Introduction diff --git a/trex_console.asciidoc b/trex_console.asciidoc index 074130e6..10a90630 100644 --- a/trex_console.asciidoc +++ b/trex_console.asciidoc @@ -60,8 +60,6 @@ on the same machine from a different window connect to to trex [source,bash] ---- -$sudo ./t-rex-64 -i ----- $./trex-console ---- @@ -70,11 +68,10 @@ from console you can run this [source,bash] ---- -$sudo ./t-rex-64 -i ----- -# will start traffic on all port +# start traffic on all port >start -a -m 1 -f stl/imix_1pkt.yaml + # stop traffic on all port >stop -a @@ -83,10 +80,8 @@ $sudo ./t-rex-64 -i #stop all and remove all stats >reset - ---- - === Common Arguments This section includes arguments that are common to many commands -- cgit 1.2.3-korg From 0ab1c3dacce1c18cee91880bc4d28b029836de06 Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Tue, 1 Dec 2015 09:41:53 +0200 Subject: Documentation + release notes for latency packet mode. --- release_notes.asciidoc | 6 ++++++ trex_book_basic.asciidoc | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 822c842f..91b89bff 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,12 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.81 == + +** Support for specifying different modes for the packets used for latency measurement. Details link:trex_manual.html#_measure_jitter_latency[here]. + +* link:http://csi-wiki-01:8090/issue/trex-149[trex-149] + == Release 1.80 == * more stateless support diff --git a/trex_book_basic.asciidoc b/trex_book_basic.asciidoc index ec0c9cf5..5efce2e2 100755 --- a/trex_book_basic.asciidoc +++ b/trex_book_basic.asciidoc @@ -3283,7 +3283,9 @@ This feature gives user more flexibility to define the IP generator. === Measure Jitter/Latency -To measure jitter/latency on high priorty packets (one SCTP flow), use `-l [Hz]` where Hz defines the number of packets to send from each port per second. This option measures latency and jitter in the latency. The shell output is similar to the following: +To measure jitter/latency on high priorty packets (one SCTP or ICMP flow), use `-l [Hz]` where Hz defines the number of packets to send from each port per second. This option measures latency and jitter. We can define the type of traffic used for the latency measurement using the --l-pkt-mode option. The default it to use SCTP packets. We can change this to use ICMP echo request packets by specifying --l-pkt-mode 1. Can send ICMP requests from one side, and matching ICMP responses from other side, by specifying --l-pkt-mode 2. This is particulary usefull if your DUT drops traffic from outside, and you need to open pin hole to get the outside traffic in (for example when testing a firewall). There is another mode (--l-pkt-mode 3) which send ICMP request packets with a constant 0 sequence number. + +The shell output is similar to the following: [source,python] ---- -- cgit 1.2.3-korg From f13e544ee9041d446c0febc263fba76b8b484043 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 3 Dec 2015 14:16:20 +0200 Subject: v1.81 --- release_notes.asciidoc | 6 +++- trex_book.asciidoc | 92 ++++++++++++++++++++++++++++++++++++++++++++---- trex_book_basic.asciidoc | 21 ++++++++++- 3 files changed, 111 insertions(+), 8 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 91b89bff..0dee7bfe 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -22,7 +22,11 @@ endif::backend-docbook[] == Release 1.81 == -** Support for specifying different modes for the packets used for latency measurement. Details link:trex_manual.html#_measure_jitter_latency[here]. +* more stateless support and fixes +** change the JSON-RPC result format +* Support for specifying different modes for the packets used for latency measurement. Details link:trex_manual.html#_measure_jitter_latency[here]. + +=== fix issues: === * link:http://csi-wiki-01:8090/issue/trex-149[trex-149] diff --git a/trex_book.asciidoc b/trex_book.asciidoc index c63c570f..d151e550 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -107,7 +107,7 @@ TRex curretly works on x86 architecture and can operates well on Cisco UCS hardw | Bandwidth | Chipset | Example | 1Gb/sec | Intel I350 | Intel 4x1GE 350-T4 NIC | 10Gb/sec | Intel 82599 | Intel x520-D2 Cisco Order tool 2X Intel N2XX-AIPCI01, Intel X520 Dual Port 10Gb SFP+ Adapter -| 40Gb/sec | Intel XL710 | QSFP+ +| 40Gb/sec | Intel XL710 Intel X710 | QSFP+, SFP+ | VMXNET | VMware paravirtualize | connect using vmWare vSwitch | E1000 | paravirtualize | vmWare/KVM/VirtualBox |================= @@ -120,12 +120,12 @@ TRex curretly works on x86 architecture and can operates well on Cisco UCS hardw * NUMAs-CPUs relation is determined with following command: lscpu * See real example of NUMA usage xref:numa-example[here] * Using Intel XL710 with Fedora 18 requires updating Kernel. -* For Intel XL710 NICs need to verify the FW is v4.42 or v4.53: +* For Intel XL710 NICs there is a need to verify the FW is v4.42 or v4.53 see xref:xl710-firmware[here] for more info ** > sudo ./t-rex-64 -f cap2/dns.yaml -d 0 --no-flow-control *-v 6* --nc | grep FW -** PMD: *FW 4.22* API 1.2 NVM 04.02.04 eetrack 800013fc -** PMD: *FW 4.22* API 1.2 NVM 04.02.04 eetrack 800013fc -** PMD: *FW 4.22* API 1.2 NVM 04.02.04 eetrack 800013fc -** PMD: *FW 4.22* API 1.2 NVM 04.02.04 eetrack 800013fc +** PMD: FW 4.22 API 1.2 NVM *04.04.02* eetrack 800013fc +** PMD: FW 4.22 API 1.2 NVM *04.04.02* eetrack 800013fc +** PMD: FW 4.22 API 1.2 NVM *04.04.02* eetrack 800013fc +** PMD: FW 4.22 API 1.2 NVM *04.04.02* eetrack 800013fc @@ -1301,3 +1301,83 @@ $./bp-sim-64-debug -f avl/sfr_delay_10_1g.yaml -v 1 <1> the memory usage of the templates <2> CSV for all the templates + +=== firmware update to XL710/X710 anchor:xl710-firmware[] + +To upgrade the firmware follow this + +==== Download the driver + +*Download driver i40e from link:https://downloadcenter.intel.com/download/24411/Network-Adapter-Driver-for-PCI-E-40-Gigabit-Network-Connections-under-Linux-[here] +*Build the kernel module + +[source,bash] +---- +$tar -xvzf i40e-1.3.47 +$cd i40e-1.3.47/src +$make +$sudo insmod i40e.ko +---- + + +==== Bind the NIC to Linux + +In this stage we bind the NIC to Linux (take it from DPDK) + +[source,bash] +---- +$sudo ./dpdk_nic_bind.py --status # show the ports + +Network devices using DPDK-compatible driver +============================================ +0000:02:00.0 'Device 1583' drv=igb_uio unused= #<1> +0000:02:00.1 'Device 1583' drv=igb_uio unused= #<2> +0000:87:00.0 'Device 1583' drv=igb_uio unused= +0000:87:00.1 'Device 1583' drv=igb_uio unused= + +$sudo dpdk_nic_bind.py -u 02:00.0 02:00.1 #<3> + +$sudo dpdk_nic_bind.py -b i40e 02:00.0 02:00.1 #<4> + +$ethtool -i p1p2 #<5> + +driver: i40e +version: 1.3.47 +firmware-version: 4.24 0x800013fc 0.0.0 #<6> +bus-info: 0000:02:00.1 +supports-statistics: yes +supports-test: yes +supports-eeprom-access: yes +supports-register-dump: yes +supports-priv-flags: yes + + +$ethtool -S p1p2 +$lspci -s 02:00.0 -vvv #<7> + + +---- +<1> XL710 ports that need to unbind from DPDK +<2> XL710 ports that need to unbind from DPDK +<3> Unbind from DPDK using this command +<4> Bind to linux to i40e driver +<5> Show firmware version throw linux driver +<6> Firmare version +<7> More info + + +==== Upgrade + +Download NVMUpdatePackage.zip from Intel site link:http://downloadcenter.intel.com/download/24769/NVM-Update-Utility-for-Intel-Ethernet-Converged-Network-Adapter-XL710-X710-Series[here] +It includes the utility `nvmupdate64e` + +Run this: + +[source,bash] +---- +$sudo ./nvmupdate64e +---- + +You might need a power cycle and to run this command a few times to get the latest firmware + + diff --git a/trex_book_basic.asciidoc b/trex_book_basic.asciidoc index 5efce2e2..c3f489a8 100755 --- a/trex_book_basic.asciidoc +++ b/trex_book_basic.asciidoc @@ -3283,7 +3283,26 @@ This feature gives user more flexibility to define the IP generator. === Measure Jitter/Latency -To measure jitter/latency on high priorty packets (one SCTP or ICMP flow), use `-l [Hz]` where Hz defines the number of packets to send from each port per second. This option measures latency and jitter. We can define the type of traffic used for the latency measurement using the --l-pkt-mode option. The default it to use SCTP packets. We can change this to use ICMP echo request packets by specifying --l-pkt-mode 1. Can send ICMP requests from one side, and matching ICMP responses from other side, by specifying --l-pkt-mode 2. This is particulary usefull if your DUT drops traffic from outside, and you need to open pin hole to get the outside traffic in (for example when testing a firewall). There is another mode (--l-pkt-mode 3) which send ICMP request packets with a constant 0 sequence number. +To measure jitter/latency on high priorty packets (one SCTP or ICMP flow), use `-l [Hz]` where Hz defines the number of packets to send from each port per second. +This option measures latency and jitter. We can define the type of traffic used for the latency measurement using the `--l-pkt-mode` option. + + +[options="header",cols="^1,10a"] +|================= +| Option ID| Type +| 0 | +*default*, SCTP packets +| 1 | +ICMP echo request packets from both sides +| 2 | +*Stateful*, send ICMP requests from one side, and matching ICMP responses from other side. + +This is particulary usefull if your DUT drops traffic from outside, and you need to open pin hole to get the outside traffic in (for example when testing a firewall) + +| 3 | +send ICMP request packets with a constant 0 sequence number. +|================= + The shell output is similar to the following: -- cgit 1.2.3-korg From 4d2d0826365ffe07e42c2cd0f743c97d4b89fb36 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 6 Dec 2015 17:18:00 +0200 Subject: fix the configuration file --- trex_book.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index d151e550..1ba42fd5 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -306,7 +306,7 @@ You could copy a basic configuration file from cfg folder by running this comman [source,bash] ---- -$cp cfg/vm1.yaml /etc/trex_cfg.yaml +$cp cfg/simple_cfg.yaml /etc/trex_cfg.yaml ---- Now edit the configuration file with the right values from the previous section -- cgit 1.2.3-korg From e1fd0609261281d24c1070afb1c3744fe6eb2524 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Mon, 7 Dec 2015 16:10:50 +0200 Subject: change notes: youtrack trex-110 issue fix update manual: option -c more info, Fedora 18 kernel update info for 40Gb Intel --- release_notes.asciidoc | 3 ++- trex_book.asciidoc | 30 +++++++++++++++++------------- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 0dee7bfe..af90e185 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -28,7 +28,8 @@ endif::backend-docbook[] === fix issues: === -* link:http://csi-wiki-01:8090/issue/trex-149[trex-149] +* link:http://csi-wiki-01:8090/issue/trex-149[trex-149] +* link:http://csi-wiki-01:8090/issue/trex-110[trex-110] == Release 1.80 == diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 1ba42fd5..a478184a 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -115,18 +115,20 @@ TRex curretly works on x86 architecture and can operates well on Cisco UCS hardw [IMPORTANT] ===================================== * Intel SFP+ 10Gb/Sec is the only one supported by default on the standard Linux driver. TRex also supports Cisco 10Gb/sec SFP+. -* Using different NUMA for different NIC is very important when getting to high speeds, such as using several Intel XL710 40Gb/sec. -* One can verify NUMA and NIC topology with following command: lstopo (yum install hwloc) -* NUMAs-CPUs relation is determined with following command: lscpu -* See real example of NUMA usage xref:numa-example[here] -* Using Intel XL710 with Fedora 18 requires updating Kernel. -* For Intel XL710 NICs there is a need to verify the FW is v4.42 or v4.53 see xref:xl710-firmware[here] for more info -** > sudo ./t-rex-64 -f cap2/dns.yaml -d 0 --no-flow-control *-v 6* --nc | grep FW -** PMD: FW 4.22 API 1.2 NVM *04.04.02* eetrack 800013fc -** PMD: FW 4.22 API 1.2 NVM *04.04.02* eetrack 800013fc -** PMD: FW 4.22 API 1.2 NVM *04.04.02* eetrack 800013fc -** PMD: FW 4.22 API 1.2 NVM *04.04.02* eetrack 800013fc - +* Using different NUMA for different NIC is very important when getting to high speeds, such as using several Intel XL710 40Gb/sec. + + One can verify NUMA and NIC topology with following command: lstopo (yum install hwloc) + + NUMAs-CPUs relation is determined with following command: lscpu + + See real example of NUMA usage xref:numa-example[here] +* Using Intel XL710 with Fedora 18 requires updating Kernel: +** > sudo yum update kernel +** > sudo yum update kernel-devel +** > sudo yum update kernel-headers +* For Intel XL710 NICs there is a need to verify the NVM is v4.42 or v4.53 see xref:xl710-firmware[here] for more info +** > sudo ./t-rex-64 -f cap2/dns.yaml -d 0 --no-flow-control *-v 6* --nc | grep NVM + + PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc + + PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc + + PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc + + PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc ===================================== @@ -1144,7 +1146,9 @@ This gave best results: with *\~98 Gb/s* TX BW and c=7, CPU utilization became * Traffic YAML configuration file. *-c=CORES*:: - Number of cores. Use 4 for TRex 40Gb/sec. Monitor the CPU% of TRex - it should be ~50%. + Number of cores _per dual interface_. Use 4 for TRex 40Gb/sec. Monitor the CPU% of TRex - it should be ~50%. + + TRex uses 2 cores for inner needs, the rest of cores can be used divided by number of dual interfaces. + + For virtual NICs the limit is -c=1. *-l=HZ*:: Run the latency daemon in this Hz rate. Example: -l 1000 runs 1000 pkt/sec from each interface. A value of zero (0) disables the latency check. -- cgit 1.2.3-korg From dc7e7c0ba8b92a241185282d9c0aeee98dd304be Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 8 Dec 2015 20:24:03 +0200 Subject: remove ZMQ default value 4507 --- trex_book.asciidoc | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 1ba42fd5..f5d825d3 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -973,16 +973,16 @@ Configuration file examples can be found in the `$ROOT/cfg` folder [source,python] ---- - - port_limit : 2 <1> - version : 2 <2> - interfaces : ["03:00.0","03:00.1"] <3> - enable_zmq_pub : true <4> - zmq_pub_port : 4500 <5> - prefix : setup1 <6> - limit_memory : 1024 <7> + - port_limit : 2 #mandatory <1> + version : 2 #mandatory <2> + interfaces : ["03:00.0","03:00.1"] #mandatory <3> + #enable_zmq_pub : true <4> + #zmq_pub_port : 4500 <5> + #prefix : setup1 <6> + #limit_memory : 1024 <7> c : 4 <8> port_bandwidth_gb : 10 <9> - port_info : # set eh mac addr + port_info : # set eh mac addr mandatory - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] # port 0 <10> src_mac : [0x2,0x0,0x0,0x2,0x0,0x00] - dest_mac : [0x3,0x0,0x0,0x3,0x0,0x00] # port 1 @@ -993,11 +993,11 @@ Configuration file examples can be found in the `$ROOT/cfg` folder src_mac : [0x0,0x0,0x0,0x8,0x0,0x02] - dest_mac : [0x0,0x0,0x0,0x9,0x0,0x03] # port 4 ---- -<1> The number of ports, should be equal to the number of interfaces in 3) -<2> Must be set to 2 -<3> Interface that should be used. used `sudo ./dpdk_setup_ports.py --show` -<4> Enable the ZMQ publisher for stats data. -<5> ZMQ port number. +<1> The number of ports, should be equal to the number of interfaces in 3) - mandatory +<2> Must be set to 2 - mandatory +<3> Interface that should be used. used `sudo ./dpdk_setup_ports.py --show` - mandatory +<4> Enable the ZMQ publisher for stats data, default is true. +<5> ZMQ port number. the default value is good. you can remove this line <6> The name of the setup should be distinct ( DPDK --file-prefix ) <7> DPDK -m limit the packet memory @@ -1088,8 +1088,6 @@ a configuration file now has the folowing struct to support multi instance - version : 2 interfaces : ["03:00.0","03:00.1"] port_limit : 2 - enable_zmq_pub : true # enable publisher for stats data - zmq_pub_port : 4507 prefix : setup1 <1> limit_memory : 1024 <2> c : 4 <3> -- cgit 1.2.3-korg From 8b0520a42a93dfd7fa656ac48c6831ee89935e11 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 8 Dec 2015 21:15:06 +0200 Subject: move defect to the right version --- release_notes.asciidoc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index af90e185..f8e7830a 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,16 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.82 == + +* more stateless support +** stats function works now + +=== fix issues: === + +* link:http://csi-wiki-01:8090/issue/trex-110[trex-110] + + == Release 1.81 == * more stateless support and fixes @@ -29,7 +39,6 @@ endif::backend-docbook[] === fix issues: === * link:http://csi-wiki-01:8090/issue/trex-149[trex-149] -* link:http://csi-wiki-01:8090/issue/trex-110[trex-110] == Release 1.80 == -- cgit 1.2.3-korg From 65804e769249a400361f4f83deb11cd8f1897c7f Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 10 Dec 2015 09:54:05 +0200 Subject: v1.82 --- release_notes.asciidoc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index f8e7830a..b021526e 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -23,7 +23,10 @@ endif::backend-docbook[] == Release 1.82 == * more stateless support -** stats function works now +** console stats function works now +** consule tui works better +** R/W support. only one client has R/W capability +* XL710/X710 support ICMP filter === fix issues: === -- cgit 1.2.3-korg From d9f0c07627914c9a22acf3912a177b23efd4ab50 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 13 Dec 2015 15:27:49 +0200 Subject: minor update to release notes --- release_notes.asciidoc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index b021526e..519a0a59 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -23,8 +23,7 @@ endif::backend-docbook[] == Release 1.82 == * more stateless support -** console stats function works now -** consule tui works better +** console stats/tui function works now ** R/W support. only one client has R/W capability * XL710/X710 support ICMP filter -- cgit 1.2.3-korg From 63dde9c52be17ac5a7ff1a74a25e8f364c92e779 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Sun, 13 Dec 2015 19:38:51 +0200 Subject: trex manual: -k broken at VM --- trex_book.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index f5fa2f55..3354e74b 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -1198,6 +1198,7 @@ This gave best results: with *\~98 Gb/s* TX BW and c=7, CPU utilization became * *-k=KSEC*:: Run a latency test before starting the test. TRex will wait for x sec before and after sending latency packets at startup. + Current limitation (holds for TRex version 1.82): does not work properly on VM. *--cfg=platform_yaml*:: Load and configure platform using this file. See example file: cfg/cfg_examplexx.yaml -- cgit 1.2.3-korg From eb9325c4ec34f6cbc8901e366fc6bcb4f8fa96b8 Mon Sep 17 00:00:00 2001 From: imarom Date: Mon, 14 Dec 2015 06:13:45 +0200 Subject: few modifications to RPC doc --- trex_rpc_server_spec.asciidoc | 107 +++++++++++++++++++++++------------------- 1 file changed, 58 insertions(+), 49 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 32007b3f..93b9e482 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -163,7 +163,7 @@ TRex > ping { "id": "l0tog11a", "jsonrpc": "2.0", - "result": "ACK" + "result": {} } [SUCCESS] @@ -199,7 +199,7 @@ Server Started { "id" : "maa5a3g1", "jsonrpc" : "2.0", - "result" : "ACK" + "result" : {} } ---- @@ -238,7 +238,7 @@ The following RPC commands are supported * *Valid States* - 'not relevant' * *Description* - Pings the TRex server * *Paramters* - None -* *Result* ['string'] - "ACK" On Sucess +* *Result* ['object'] - {} Example: @@ -258,7 +258,7 @@ Example: { "jsonrpc" : "2.0", "id" : 1, - "result" : "ACK" + "result" : {} } ---- @@ -372,8 +372,8 @@ Example: |================= | Field | Type | Description | driver | string | driver type -| speed | string | speed of the port (1g, 10g, 40g, 100g) -| status | string | 'down', 'idle' or 'transmitting' +| index | int | port index +| speed | int | speed of the port (1, 10, 40, 100) |================= @@ -401,28 +401,24 @@ Example: "port_count": 4, "ports": [ { - "driver": "E1000", + "driver": "rte_ixgbe_pmd", "index": 0, - "speed": "1g", - "status": "down" + "speed": 10, }, { - "driver": "E1000", + "driver": "rte_ixgbe_pmd", "index": 1, - "speed": "1g", - "status": "down" + "speed": 10, }, { - "driver": "E1000", + "driver": "rte_ixgbe_pmd", "index": 2, - "speed": "1g", - "status": "down" + "speed": 10, }, { - "driver": "E1000", + "driver": "rte_ixgbe_pmd", "index": 3, - "speed": "1g", - "status": "down" + "speed": 10, } ] } @@ -430,13 +426,13 @@ Example: ---- -=== Get Owner -* *Name* - 'get_owner' +=== Get Port Status +* *Name* - 'get_port_status' * *Valid States* - 'all' -* *Description* - Queries the server for a specific port current owner +* *Description* - Queries the server for status * *Paramters* - ** *port_id* ['int'] - port id to query for owner -* *Result* ['string'] - owner name if exists, otherwise 'none' +* *Result* ['object'] - see below [source,bash] ---- @@ -444,26 +440,37 @@ Example: 'Request': { - "id": "hxjkuwj9", + "id": "pbxny90u", "jsonrpc": "2.0", - "method": "get_owner", + "method": "get_port_status", "params": { - "port_id": 1 + "port_id": 2 } } 'Response': { - "id": "hxjkuwj9", + "id": "pbxny90u", "jsonrpc": "2.0", "result": { - "owner": "itay" + "owner": "", + "state": "STREAMS" } } ---- +.return value: 'get_port_status' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| owner | string | name of current owner (or "" if none) +| state | string | state of port (DOWN, IDLE, STREAMS, TX, PAUSE) +|================= + + + === Acquire * *Name* - 'Acquire' * *Valid States* - 'all' @@ -472,7 +479,7 @@ Example: ** *port_id* ['int'] - port id to take ownership ** *user* ['string'] - User name aquiring the system ** *force* ['boolean'] - force action even if another user is holding the port -* *Result* ['string'] - 'unique' connection handler for future requests for that port +* *Result* ['string'] - handler for future sessions [source,bash] ---- @@ -496,8 +503,9 @@ Example: { "id": "b1tr56yz", "jsonrpc": "2.0", - "result": "4cBWDxS2" + "result": "AQokC3ZA" } + ---- @@ -508,7 +516,7 @@ Example: * *Paramters* - ** *handler* ['string'] - unique connection handler ** *port_id* ['int'] - port id to release -* *Result* ['string'] - "ACK" on success +* *Result* ['object'] - {} [source,bash] ---- @@ -531,7 +539,7 @@ Example: { "id": "m785dxwd", "jsonrpc": "2.0", - "result": "ACK" + "result": {} } ---- @@ -545,7 +553,7 @@ Example: ** *port_id* ['int'] - port id associated with this stream ** *stream_id* ['int'] - stream id associated with the stream object ** *stream* - object of type xref:stream_obj['stream'] -* *Result* ['string'] - "ACK" in case of success +* *Result* ['object'] - {} ==== Object type 'stream' anchor:stream_obj[] @@ -730,7 +738,7 @@ This could be stream_id different from the stream object which contains the rx_s { "id": 1, "jsonrpc": "2.0", - "result": "ACK" + "result": {} } @@ -749,7 +757,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj ** *port_id* ['int'] - port assosicated with the stream. ** *stream_id* ['int'] - stream to remove -* *Result* ['string'] - "ACK" in case of success +* *Result* ['object'] - {} [source,bash] ---- @@ -773,14 +781,14 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj { "id": 1 "jsonrpc": "2.0", - "result": "ACK" + "result": {} } ---- === Get Stream ID List * *Name* - 'get_stream_list' -* *Valid States* - 'owned', 'active' +* *Valid States* - 'unowned', 'owned', 'active' * *Description* - fetch all the assoicated streams for a port * *Paramters* ** *handler* ['string'] - unique connection handler @@ -819,7 +827,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Get Stream * *Name* - 'get_stream' -* *Valid States* - 'owned', 'active' +* *Valid States* - 'unowned', 'owned', 'active' * *Description* - get a specific stream object * *Paramters* ** *handler* ['string'] - unique connection handler @@ -883,7 +891,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj ** *handler* ['string'] - unique connection handler ** *port_id* ['int'] - port for the associated stream -* *Result* ['string'] - "ACK" on success +* *Result* ['object'] - {} [source,bash] @@ -906,7 +914,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj { "id": 1, "jsonrpc": "2.0", - "result": "ACK" + "result": {} } @@ -921,7 +929,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj ** *handler* ['string'] - unique connection handler ** *port_id* ['int'] - port id on which to start traffic -* *Result* ['string'] - "ACK" on success +* *Result* ['object'] - {} [source,bash] ---- @@ -942,7 +950,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj { "id": "b3llt8hs", "jsonrpc": "2.0", - "result": "ACK" + "result": {} } @@ -956,7 +964,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj ** *handler* ['string'] - unique connection handler ** *port_id* ['int'] - port id on which to stop traffic -* *Result* ['string'] - "ACK" on success +* *Result* ['object'] - {} [source,bash] ---- @@ -978,7 +986,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj { "id": "h2fyhni7", "jsonrpc": "2.0", - "result": "ACK" + "result": {} } @@ -986,7 +994,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Get Global Stats * *Name* - 'get_global_stats' -* *Valid States* - 'owned', 'active' +* *Valid States* - 'unowned', 'owned', 'active' * *Description* - Get machine global stats * *Paramters* - None @@ -1011,13 +1019,14 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Get Port Stats * *Name* - 'get_port_stats' -* *Valid States* - 'owned', 'active' +* *Valid States* - 'unowned', 'owned', 'active' * *Description* - Get port stats * *Paramters* ** *port_id* [int] - The port id for query * *Result* ['object'] - See Below + .Return value of 'get_port_stats' [options="header",cols="1,1,3"] |================= @@ -1036,7 +1045,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Get Stream Stats * *Name* - 'get_steram_stats' -* *Valid States* - 'owned', 'active' +* *Valid States* - 'unowned', 'owned', 'active' * *Description* - Get port stats * *Paramters* ** *port_id* [int] - The port id for query @@ -1373,7 +1382,7 @@ On the following example, there's no VM instructions, rx_stats option is disable { "id" : "2bqgd2r4", "jsonrpc" : "2.0", - "result" : "ACK" + "result" : {} } ---- @@ -1505,12 +1514,12 @@ Ontop, this stream is the last stream of the sequence, so `next_stream_id` of `s { "id" : "tq49f6uj", "jsonrpc" : "2.0", - "result" : "ACK" + "result" : {} }, { "id" : "2m7i5olx", "jsonrpc" : "2.0", - "result" : "ACK" + "result" : {} } ] -- cgit 1.2.3-korg From 82075b21190b54b1cf9a11c9a5095b9f8c0d4c5f Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 16 Dec 2015 21:38:29 +0200 Subject: add tuple_generator command --- trex_rpc_server_spec.asciidoc | 123 +++++++++++++++++++++++++++++++++++++++++- visio_drawings/streams.vsd | Bin 0 -> 63488 bytes 2 files changed, 122 insertions(+), 1 deletion(-) create mode 100644 visio_drawings/streams.vsd diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 93b9e482..5ffb8a1c 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -7,6 +7,8 @@ The TRex RPC Server :numbered: :web_server_url: http://trex-tgn.cisco.com/trex :local_web_server_url: csi-wiki-01:8181/trex +:toclevels: 4 + == Change log @@ -23,6 +25,9 @@ The TRex RPC Server | - Fixed some consistency issues - added RPC interaction examples appendix +| 1.2 | Hanoch Haim (hhaim) +| +- add tuple generator command |================= @@ -654,6 +659,52 @@ Any element in the array can be one of the following object types: | is_big_endian | boolean | should write as big endian or little |================= + +.Object type 'vm - tuple_flow_var' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | ''tuple_flow_var''' +| name | string | tuple generator name - this should be a unique identifier name.ip and name.port will be added +| ip_min | uint32_t as string | ipv4 min ip as uint32_t e.g. 10.0.0.1 +| ip_max | uint32_t as string | ipv4 max ip as uint32_t e.g. 10.0.1.255 +| port_min | uint16_t as string | ipv4 min port as uint16_t e.g. 1025 +| port_max | uint16_t as string | ipv4 max port as uint16_t e.g. 65000 +| limit_flows | uint32_t as string | the number of flows. 0 means we will use all the ip/port min-max range +| flags | uint16_t as string | 1 - unlimited number of flows. in case the first bit is enabled port_min and port_max is ignored and the maximum number of flows will be generated on those ips +|================= + +an example of tuple_flow_var variable + +[source,bash] +---- + ip_min = 10.0.0.1 + ip_max = 10.0.0.5 + port_min = 1025 + port_max = 1028 + limit_flows = 10 +---- + +.Results +[options="header",cols="1,1,3"] +|================= +| IP | PORT | FLOW +| 10.0.0.1 | 1025 | 1 +| 10.0.0.2 | 1025 | 2 +| 10.0.0.3 | 1025 | 3 +| 10.0.0.4 | 1025 | 4 +| 10.0.0.5 | 1025 | 5 +| 10.0.0.1 | 1026 | 6 << the port is inc here +| 10.0.0.2 | 1026 | 7 +| 10.0.0.3 | 1026 | 8 +| 10.0.0.4 | 1026 | 9 +| 10.0.0.5 | 1026 | 10 +| 10.0.0.1 | 1025 | 1 << back to the first flow +|================= + +The variable name.port and name.ip could be written to any offset in the packet (usualy to src_ip and src_port as client) + + TIP: For more information and examples on VM objects please refer to: link:vm_doc.html[VM examples] @@ -1523,4 +1574,74 @@ Ontop, this stream is the last stream of the sequence, so `next_stream_id` of `s } ] ----- \ No newline at end of file +---- + + +Another Example of tuple generator +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +[source, bash] +---- + - name: udp_64B + stream: + self_start: True + packet: + binary: stl/udp_64B_no_crc.pcap # pcap should not include CRC + mode: + type: continuous + pps: 100 + rx_stats: [] + + # program that define 1M flows with IP range 16.0.0.1-16.0.0.254 + # we will create a script that do that for you + # this is the low level instructions + vm: [ + { + "type" : "tuple_flow_var", # name of the command + + "name" : "tuple_gen", # tuple_gen.ip tuple_gen.port can be used + + "ip_min" : 0x10000001, # min ip 16.0.0.1 + "ip_max" : 0x100000fe, # max ip 16.0.0.254 + + "port_min" : 1025, # min port 1025 + "port_max" : 65500, # max port 65500 + + "limit_flows" : 1000000, # number of flows + "flags" : 0, # 1 - for unlimited + }, + + { + "type" : "write_flow_var", # command name + + "name" : "tuple_gen.ip", # varible to write + + "add_value" : 0, # no need to add value + + "is_big_endian" : true, # write as big edian + + "pkt_offset" : 26, # write tuple_gen.ip into ipv4.src_ip + }, + + { + "type" : "fix_checksum_ipv4", # fix ipv4 header checksum + + "pkt_offset" : 14, # offset of ipv4 header + + }, + + { + "type" : "write_flow_var", # command name + + "name" : "tuple_gen.port", # varible to write + + "add_value" : 0, # no need to add value + + "is_big_endian" : true, # write as big edian + + "pkt_offset" : 34, # write tuple_gen.port into udp.src_port + } + + ] +---- diff --git a/visio_drawings/streams.vsd b/visio_drawings/streams.vsd new file mode 100644 index 00000000..dd925c1d Binary files /dev/null and b/visio_drawings/streams.vsd differ -- cgit 1.2.3-korg From 04776cd2a3ce5aea15ddf9cb8f4800986f72e6db Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 17 Dec 2015 09:24:39 +0200 Subject: v1.83 release notes --- release_notes.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 519a0a59..77fbd5d3 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,12 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.83 == + +* more stateless support +** Add basic Packet Field engine see stl/imin_1pkt_vm.yaml +** some improvment with TUI window can be run in parallel with --tui option + == Release 1.82 == * more stateless support -- cgit 1.2.3-korg From 122f2e0f4b3250308a07e13edc06787b56a47a1a Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Mon, 21 Dec 2015 19:38:14 +0200 Subject: extra marks cleanup --- trex_book.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 3354e74b..41e843a4 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -825,7 +825,7 @@ $sudo ./t-rex-64 -f avl/sfr_delay_10_1g.yaml -c 4 -p -l 100 -d 100000 -m 30 --r [source,python] ---- Cpu Utilization : 0.1 % <1> - if| tx_ok , rx_ok , rx ,error, average , max , Jitter<1> , max window + if| tx_ok , rx_ok , rx ,error, average , max , Jitter , max window | , , check, , latency(usec),latency (usec) ,(usec) , -------------------------------------------------------------------------------- 0 | 1002, 1002, 2501, 0, 61 , 70, 3 | 60 @@ -835,7 +835,7 @@ Cpu Utilization : 0.1 % Rx Check stats enabled <2> ------------------------------------------------------------------------------------------- - rx check: avg/max/jitter latency, 94 , 744, 49<1> | 252 287 309 <3> + rx check: avg/max/jitter latency, 94 , 744, 49 | 252 287 309 <3> active flows: 10, fif: 308, drop: 0, errors: 0 <4> ------------------------------------------------------------------------------------------- -- cgit 1.2.3-korg From c3092eeba15c82d4fc856ac4b66f9e2ceaaed97d Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Wed, 23 Dec 2015 12:10:10 +0200 Subject: VMXNET3 - Ubuntu not Fedora --- trex_book.asciidoc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 41e843a4..b1a1d0af 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -108,12 +108,14 @@ TRex curretly works on x86 architecture and can operates well on Cisco UCS hardw | 1Gb/sec | Intel I350 | Intel 4x1GE 350-T4 NIC | 10Gb/sec | Intel 82599 | Intel x520-D2 Cisco Order tool 2X Intel N2XX-AIPCI01, Intel X520 Dual Port 10Gb SFP+ Adapter | 40Gb/sec | Intel XL710 Intel X710 | QSFP+, SFP+ -| VMXNET | VMware paravirtualize | connect using vmWare vSwitch +| VMXNET / + +VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch | E1000 | paravirtualize | vmWare/KVM/VirtualBox |================= [IMPORTANT] ===================================== +* For VMXNET3 use Ubuntu and *not* Fedora. * Intel SFP+ 10Gb/Sec is the only one supported by default on the standard Linux driver. TRex also supports Cisco 10Gb/sec SFP+. * Using different NUMA for different NIC is very important when getting to high speeds, such as using several Intel XL710 40Gb/sec. + One can verify NUMA and NIC topology with following command: lstopo (yum install hwloc) + -- cgit 1.2.3-korg From 5fa914c812b70d756d365456ac4d90fd10185bec Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Thu, 24 Dec 2015 10:03:39 +0200 Subject: add Ubuntu OVA link --- trex_book.asciidoc | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index b1a1d0af..24a5e0cd 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -175,21 +175,28 @@ x86_64 #<1> <1> x86_64 is the desired output +==== Download OVA file + +The OVA is a ready to use clone of VM to be deployed at Hypervisor. + +* link:http://csi-wiki-01:8181/trex/TRex_Ubuntu_VM.ova[Ubuntu 14.04.1] + + ==== Download ISO file The ISO images of the described Linux OS can be downloaded from the following links: .Supported Linux ISO image links -[options="header",cols="1,2,3^",width="50%"] +[options="header",cols="1^,2^",width="50%"] |====================================== -| # | Distribution | SHA256 Checksum -| 1.| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/18/Fedora/x86_64/iso/Fedora-18-x86_64-DVD.iso[Fedora 18] +| Distribution | SHA256 Checksum +| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/18/Fedora/x86_64/iso/Fedora-18-x86_64-DVD.iso[Fedora 18] | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/18/Fedora/x86_64/iso/Fedora-18-x86_64-CHECKSUM[Fedora 18 CHECKSUM] -| 2.| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-19-x86_64-DVD.iso[Fedora 19] +| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-19-x86_64-DVD.iso[Fedora 19] | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-19-x86_64-CHECKSUM[Fedora 19 CHECKSUM] -| 3.| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-DVD.iso[Fedora 20] +| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-DVD.iso[Fedora 20] | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-CHECKSUM[Fedora 20 CHECKSUM] -| 4.|link:http://old-releases.ubuntu.com/releases/14.04.1/ubuntu-14.04-desktop-amd64.iso[Ubuntu 14.04.01] +| link:http://old-releases.ubuntu.com/releases/14.04.1/ubuntu-14.04-desktop-amd64.iso[Ubuntu 14.04.1] | http://old-releases.ubuntu.com/releases/14.04.1/SHA256SUMS[Ubuntu 14.04 CHECKSUM] |====================================== @@ -203,6 +210,7 @@ $sha256sum Fedora-18-x86_64-DVD.iso ---- <1> Should be equal to the sha256 values described in the linked CHECKSUM files. + ==== Install Linux Ask your lab admin to install the Linux using CIMC, assign an IP, and set the DNS. Request the sudo or super user password to enable you to ping and SSH. -- cgit 1.2.3-korg From 70239c1970137cb62bb81b1848273501eae83a6e Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Thu, 24 Dec 2015 15:11:24 +0200 Subject: add info about OVA username/password --- trex_book.asciidoc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 24a5e0cd..5a779671 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -179,7 +179,9 @@ x86_64 #<1> The OVA is a ready to use clone of VM to be deployed at Hypervisor. -* link:http://csi-wiki-01:8181/trex/TRex_Ubuntu_VM.ova[Ubuntu 14.04.1] +* link:http://csi-wiki-01:8181/trex/TRex_Ubuntu_VM.ova[Ubuntu 14.04.1] + +Username/password: trex/trex. Root password is trex as well. + +It has TRex v1.83 in /home/trex directory + 2xVMXNET3 NICs for TRex + /etc/trex_cfg.yaml configured for them. Ready to run. ==== Download ISO file -- cgit 1.2.3-korg From 566130daf2a871c90d8981fbba4a469a059ccb50 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 24 Dec 2015 16:34:54 +0200 Subject: v1.83 fix --- release_notes.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 77fbd5d3..9e4bb21c 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -24,7 +24,7 @@ endif::backend-docbook[] * more stateless support ** Add basic Packet Field engine see stl/imin_1pkt_vm.yaml -** some improvment with TUI window can be run in parallel with --tui option +** some improvement with TUI window. Can be run in parallel with --tui option == Release 1.82 == -- cgit 1.2.3-korg From a6d3153a43f7bd090c3a11c5d68fb4ab2ca500a8 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 24 Dec 2015 16:42:37 +0200 Subject: remove the local wiki --- trex_book.asciidoc | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 5a779671..0f938bbd 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -115,7 +115,7 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch [IMPORTANT] ===================================== -* For VMXNET3 use Ubuntu and *not* Fedora. +* For VMXNET3 use Ubuntu and *not* Fedora 18. Fedora 18 will crash. * Intel SFP+ 10Gb/Sec is the only one supported by default on the standard Linux driver. TRex also supports Cisco 10Gb/sec SFP+. * Using different NUMA for different NIC is very important when getting to high speeds, such as using several Intel XL710 40Gb/sec. + One can verify NUMA and NIC topology with following command: lstopo (yum install hwloc) + @@ -175,13 +175,6 @@ x86_64 #<1> <1> x86_64 is the desired output -==== Download OVA file - -The OVA is a ready to use clone of VM to be deployed at Hypervisor. - -* link:http://csi-wiki-01:8181/trex/TRex_Ubuntu_VM.ova[Ubuntu 14.04.1] + -Username/password: trex/trex. Root password is trex as well. + -It has TRex v1.83 in /home/trex directory + 2xVMXNET3 NICs for TRex + /etc/trex_cfg.yaml configured for them. Ready to run. ==== Download ISO file -- cgit 1.2.3-korg From f47c74bea164302f6d4726987c7e4282631f67d3 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 24 Dec 2015 16:45:16 +0200 Subject: v1.84 --- release_notes.asciidoc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 9e4bb21c..b9428e71 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,14 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.84 == + +* more stateless support +** Add splitter range support see "split_by_var" in style/imix_1pkt_vm. yaml +** Add more samples see stl/syn_attack_sample.yaml. Improve random performance +** more improvement with TUI window + + == Release 1.83 == * more stateless support -- cgit 1.2.3-korg From 7b5337d08ec2c5dda98e91d32c1471e7779291c1 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 24 Dec 2015 17:10:17 +0200 Subject: v1.84 --- trex_rpc_server_spec.asciidoc | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 5ffb8a1c..46ff734c 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -28,7 +28,9 @@ The TRex RPC Server | 1.2 | Hanoch Haim (hhaim) | - add tuple generator command - +| 1.3 | Hanoch Haim (hhaim) +| +- update VM instructions |================= @@ -576,7 +578,7 @@ The format of that object is as follows: | next_stream_id | int | next stream to start after this stream. -1 means stop after this stream | packet | object | object of type xref:packet_obj['packet'] | mode | object | object of type xref:mode_obj['mode'] -| vm | array | array of objects of type xref:vm_obj['vm'] +| vm | object | array of objects of type xref:vm_obj['vm'] | rx_stats | object | object of type xref:rx_stats_obj['rx_stats'] |================= @@ -623,8 +625,20 @@ mode object can be 'one' of the following objects: |================= ===== Object type 'vm' anchor:vm_obj[] -Array of VM instruction objects to be used with this stream +an Object that include instructions array and properties of the field engine program + +.Object type 'packet' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| Instructions | array | list of instructional objects +| split_by_var | string | name of the field by which to split into threads +| Restart | boolean | restart the field engine program when stream moving from inactive->active +|================= + + +Array of VM instruction objects to be used with this stream Any element in the array can be one of the following object types: .Object type 'vm - fix_checksum_ipv4' @@ -643,9 +657,9 @@ Any element in the array can be one of the following object types: | name | string | flow var name - this should be a unique identifier | size | [1,2,4,8] | size of the flow var in bytes | op | ['inc', 'dec', 'random'] | operation type to perform on the field -| init value | uint64_t as string | init value for the field -| min value | uint64_t as string | minimum value for the field -| max value | uint64_t as string | maximum value for the field +| init_value | uint64_t as string | init value for the field +| min_value | uint64_t as string | minimum value for the field +| max_value | uint64_t as string | maximum value for the field |================= .Object type 'vm - write_flow_var' -- cgit 1.2.3-korg From 9a3bc7586d45290d345ad3514c89c331fc481f2d Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 5 Jan 2016 13:34:43 +0200 Subject: v1.85 --- release_notes.asciidoc | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index b9428e71..254d6bb1 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,25 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.85 == + +* Upgrade to DPDK 2.2.0 +** Some XL710/X710 NIC phy issues solved +** VMXNET3 driver is optimized +** Cisco VIC should be supported, not tested yet +* Jumbo packet size is supported for 1/10/40 Intel NIC up to 9K for both stateless and stateful +* youTrack is public now, can be seen here link:http://trex-tgn.cisco.com/youtrack[here] +* More stateless support +** Support random packet size trim instruction - see stl/udp_rand_size_9k.yaml for an example +** Move Python Regression to trex-core +** Add Coverity scripts +** Console/Python API can be call from Cisco CEL now (ZMQ Python library is compiled to an old glibc) +** Add simulator for stateless + +=== fix issues: === + +* The infamous DPDK error is not seen in case of a wrong core argument see here link:http://trex-tgn.cisco.com/youtrack/issue/trex-147[trex-147] + == Release 1.84 == * more stateless support @@ -43,7 +62,7 @@ endif::backend-docbook[] === fix issues: === -* link:http://csi-wiki-01:8090/issue/trex-110[trex-110] +* link:http://trex-tgn.cisco.com/youtrack/trex-110[trex-110] == Release 1.81 == @@ -54,7 +73,7 @@ endif::backend-docbook[] === fix issues: === -* link:http://csi-wiki-01:8090/issue/trex-149[trex-149] +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-149[trex-149] == Release 1.80 == @@ -80,12 +99,12 @@ endif::backend-docbook[] Python API fixup see here -* link:http://csi-wiki-01:8090/issue/trex-126[trex-126] -* link:http://csi-wiki-01:8090/issue/trex-123[trex-122] +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-126[trex-126] +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-123[trex-122] Check for 64bit Kernel -* link:http://csi-wiki-01:8090/issue/trex-123[trex-123] +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-123[trex-123] == Release 1.77 == -- cgit 1.2.3-korg From adc9496a1f23f20f088fb0fc541882e97f0ae8ed Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Tue, 19 Jan 2016 09:04:46 +0200 Subject: Add documentation for --learn-mod --- trex_book.asciidoc | 25 +++++++++++++------------ trex_book_basic.asciidoc | 4 ++-- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 0f938bbd..63ab7294 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -126,7 +126,7 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch ** > sudo yum update kernel-devel ** > sudo yum update kernel-headers * For Intel XL710 NICs there is a need to verify the NVM is v4.42 or v4.53 see xref:xl710-firmware[here] for more info -** > sudo ./t-rex-64 -f cap2/dns.yaml -d 0 --no-flow-control *-v 6* --nc | grep NVM + +** > sudo ./t-rex-64 -f cap2/dns.yaml -d 0 *-v 6* --nc | grep NVM + PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc + PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc + PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc + @@ -716,9 +716,8 @@ TRex(0) -| |-TRex(1) === NAT support -TRex can learn dynamic NAT/PAT translation. To enable this feature add `--learn` to the command line. -//TBD: maybe... add the '--learn' option on the command line. -This is done by adding an IPv4 option header with TRex info (8 bytes long 0x10 id) to the first packet of the flow. +TRex can learn dynamic NAT/PAT translation. To enable this feature add `--learn-mode ` to the command line. +In mode 2, this is done by adding an IPv4 option header with TRex info (8 bytes long 0x10 id) to the first packet of the flow. In mode 2, this is done by embedding NAT info in the ACK of the first TCP SYN. *Example:*:: @@ -726,14 +725,14 @@ This is done by adding an IPv4 option header with TRex info (8 bytes long 0x10 i [source,bash] ---- -$sudo ./t-rex-64 -f cap2/http_simple.yaml -c 4 -l 1000 -d 100000 -m 30 --learn +$sudo ./t-rex-64 -f cap2/http_simple.yaml -c 4 -l 1000 -d 100000 -m 30 --learn-mode 1 ---- *SFR traffic without bundeling/ALG support* [source,bash] ---- -$sudo ./t-rex-64 -f avl/sfr_delay_10_1g_no_bundeling.yaml -c 4 -l 1000 -d 100000 -m 10 --learn +$sudo ./t-rex-64 -f avl/sfr_delay_10_1g_no_bundeling.yaml -c 4 -l 1000 -d 100000 -m 10 --learn-mode 2 ---- *New terminal counters:*:: @@ -1165,10 +1164,12 @@ This gave best results: with *\~98 Gb/s* TX BW and c=7, CPU utilization became * *--ipv6*:: Convert template to IPv6 mode. -*--learn*:: - Learn the dynamic NAT translation and ALG. - -*--learn-verify*:: +*--learn-mode *:: + Learn the dynamic NAT translation. + + 1 - Use TCP ACK in first SYN to pass NAT translation information. Will work only for TCP streams. Initial SYN packet must be present in stream. + + 2 - Add special IP option to pass NAT translation information. Will not work on certain firewalls if they drop packets with IP options. + +*--learn-verify*:: Learn the translation. This feature is intended for verification of the mechanism in cases where there is no NAT. *-p*:: @@ -1230,8 +1231,8 @@ This gave best results: with *\~98 Gb/s* TX BW and c=7, CPU utilization became * *--iom=MODE*:: I/O mode for interactive mode. Possible values: 0 (silent), 1 (normal), 2 (short) -*--no-flow-control*:: - Prevents TRex from changing flow control. In default TRex operation, flow control is disabled at startup. +*--no-flow-control-change*:: + Prevents TRex from changing flow control. By default (without this option), TRex disables flow control at startup for all cards, except for the Intel XL710 40G card. *--mac-spread*:: Spread the destination mac by this this factor. e.g 2 will generate the traffic to 2 devices DEST-MAC ,DEST-MAC+1. The maximum is up to 128 devices. diff --git a/trex_book_basic.asciidoc b/trex_book_basic.asciidoc index c3f489a8..6e679114 100755 --- a/trex_book_basic.asciidoc +++ b/trex_book_basic.asciidoc @@ -2683,7 +2683,7 @@ $.sudo /t-rex-64 -f avl/sfr_delay_10_1g_no_bundeling.yaml -c 4 -m 20 -d 100 -l 1 .Simple HTTP 1Gb/sec with NAT translation support [source,bash] ---- -$.sudo /t-rex-64 -f cap2/simple_http.yaml -c 4 -m 100 -d 100 -l 1000 --learn +$.sudo /t-rex-64 -f cap2/simple_http.yaml -c 4 -m 100 -d 100 -l 1000 --learn-mode 1 ---- .IMIX 1G/sec ,1600 flows @@ -2712,7 +2712,7 @@ $.sudo /t-rex-64 -f cap2/imix_64.yaml -c 4 -m 1 -d 100 -l 1000 | name | description | cap2/dns.yaml | simple dns pcap file | cap2/http_simple.yaml | simple http cap file -| avl/sfr_delay_10_1g_no_bundeling.yaml | sfr traffic profile capture from Avalanche - Spirent without bundeling support with RTT=10msec ( a delay machine), this can be used with --ipv6 and --learn mode +| avl/sfr_delay_10_1g_no_bundeling.yaml | sfr traffic profile capture from Avalanche - Spirent without bundeling support with RTT=10msec ( a delay machine), this can be used with --ipv6 and --learn-mode | avl/sfr_delay_10_1g.yaml | head-end sfr traffic profile capture from Avalanche - Spirent with bundeling support with RTT=10msec ( a delay machine), it is normalized to 1Gb/sec for m=1 | avl/sfr_branch_profile_delay_10.yaml | branch sfr profile capture from Avalanche - Spirent with bundeling support with RTT=10msec it, is normalized to 1Gb/sec for m=1 | cap2/imix_fast_1g.yaml | imix profile with 1600 flows normalized to 1Gb/sec. -- cgit 1.2.3-korg From ec80d78c338e284bf28f315205aa7e2338385c77 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 20 Jan 2016 10:05:13 +0200 Subject: add trim instruction for random packet size --- trex_rpc_server_spec.asciidoc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 46ff734c..2f12cc6e 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -31,6 +31,10 @@ The TRex RPC Server | 1.3 | Hanoch Haim (hhaim) | - update VM instructions +| 1.4 | Hanoch Haim (hhaim) +| +- add random trim instruction + |================= @@ -673,6 +677,13 @@ Any element in the array can be one of the following object types: | is_big_endian | boolean | should write as big endian or little |================= +.Object type 'vm - trim_pkt_size' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | ''trim_pkt_size'' +| name | string | flow var name to take the new trim packet size from. The var size should be valid packet size and less than template packet size. see `stl/udp_rand_size.yaml` for an example +|================= .Object type 'vm - tuple_flow_var' [options="header",cols="1,1,3"] -- cgit 1.2.3-korg From 429eb374c18a6d8f2edfe051f9ab73229a2deb24 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 20 Jan 2016 04:57:12 +0200 Subject: v1.86 release notes --- release_notes.asciidoc | 7 ++ trex_book.asciidoc | 209 ++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 215 insertions(+), 1 deletion(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 254d6bb1..ff314335 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,13 @@ ifdef::backend-docbook[] endif::backend-docbook[] + +== Release 1.86 == + +* NAT Cisco ASA support +** Add support for learning using TCP-ACK field see more here link:trex_manual.html#_nat_support[here] and link:trex_manual.html#_trex_with_asa_5585[here] +* More stateless support + == Release 1.85 == * Upgrade to DPDK 2.2.0 diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 63ab7294..d826b4a4 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -717,7 +717,11 @@ TRex(0) -| |-TRex(1) === NAT support TRex can learn dynamic NAT/PAT translation. To enable this feature add `--learn-mode ` to the command line. -In mode 2, this is done by adding an IPv4 option header with TRex info (8 bytes long 0x10 id) to the first packet of the flow. In mode 2, this is done by embedding NAT info in the ACK of the first TCP SYN. +In mode 2, this is done by adding an IPv4 option header with TRex info (8 bytes long 0x10 id) to the first packet of the flow. +In mode 1, this is done by embedding NAT info in the ACK of the first TCP SYN. +In Mode 1 there is a limitation that only UDP from inside to outside are supported. UDP with both sides, for example DNS won't be supported in this mode. +This mode was added to support Cisco ASA that in default drop any + *Example:*:: @@ -1390,3 +1394,206 @@ $sudo ./nvmupdate64e You might need a power cycle and to run this command a few times to get the latest firmware +=== TRex with ASA 5585 + +Running TRex aginst ASA 5585 has some limitation + +* There is a need to disable TCP sequence randomization using the command `set connection random-sequence-number disable` +* ASA can't forward ipv4 options so there is a need to use --learn-mode 1 in case of NAT. In this mode UDP with two directions are not supported +* Client side traffic goes from TenGigabitEthernet0/8 server side from TenGigabitEthernet0/9 +* Latency should be tested using ICMP with `--l-pkt-mode 2` + + +==== ASA 5585 sample configuration + +[source,bash] +---- +ciscoasa# show running-config +: Saved + +: +: Serial Number: JAD194801KX +: Hardware: ASA5585-SSP-10, 6144 MB RAM, CPU Xeon 5500 series 2000 MHz, 1 CPU (4 cores) +: +ASA Version 9.5(2) +! +hostname ciscoasa +enable password 8Ry2YjIyt7RRXU24 encrypted +passwd 2KFQnbNIdI.2KYOU encrypted +names +! +interface Management0/0 + management-only + nameif management + security-level 100 + ip address 10.56.216.106 255.255.255.0 +! +interface TenGigabitEthernet0/8 + nameif inside + security-level 100 + ip address 15.0.0.1 255.255.255.0 +! +interface TenGigabitEthernet0/9 + nameif outside + security-level 0 + ip address 40.0.0.1 255.255.255.0 +! +boot system disk0:/asa952-smp-k8.bin +ftp mode passive +pager lines 24 +logging asdm informational +mtu management 1500 +mtu inside 9000 +mtu outside 9000 +no failover +no monitor-interface service-module +icmp unreachable rate-limit 1 burst-size 1 +no asdm history enable +arp outside 40.0.0.2 90e2.baae.87d1 +arp inside 15.0.0.2 90e2.baae.87d0 +arp timeout 14400 +no arp permit-nonconnected +route management 0.0.0.0 0.0.0.0 10.56.216.1 1 +route inside 16.0.0.0 255.0.0.0 15.0.0.2 1 +route outside 48.0.0.0 255.0.0.0 40.0.0.2 1 +timeout xlate 3:00:00 +timeout pat-xlate 0:00:30 +timeout conn 1:00:00 half-closed 0:10:00 udp 0:02:00 sctp 0:02:00 icmp 0:00:02 +timeout sunrpc 0:10:00 h323 0:05:00 h225 1:00:00 mgcp 0:05:00 mgcp-pat 0:05:00 +timeout sip 0:30:00 sip_media 0:02:00 sip-invite 0:03:00 sip-disconnect 0:02:00 +timeout sip-provisional-media 0:02:00 uauth 0:05:00 absolute +timeout tcp-proxy-reassembly 0:01:00 +timeout floating-conn 0:00:00 +user-identity default-domain LOCAL +http server enable +http 192.168.1.0 255.255.255.0 management +no snmp-server location +no snmp-server contact +crypto ipsec security-association pmtu-aging infinite +crypto ca trustpool policy +telnet 0.0.0.0 0.0.0.0 management +telnet timeout 5 +ssh stricthostkeycheck +ssh timeout 5 +ssh key-exchange group dh-group1-sha1 +console timeout 0 +! +tls-proxy maximum-session 1000 +! +threat-detection basic-threat +threat-detection statistics access-list +no threat-detection statistics tcp-intercept +dynamic-access-policy-record DfltAccessPolicy +! +class-map icmp-class + match default-inspection-traffic +class-map inspection_default + match default-inspection-traffic +class-map no-tcp-seq-rand + match any +! +! +policy-map type inspect dns preset_dns_map + parameters + message-length maximum client auto + message-length maximum 512 +policy-map no-tcp-seq-rand + class no-tcp-seq-rand + set connection random-sequence-number disable #<1> +policy-map icmp_policy + class icmp-class + inspect icmp +policy-map global_policy + class inspection_default + inspect dns preset_dns_map + inspect ftp + inspect h323 h225 + inspect h323 ras + inspect rsh + inspect rtsp + inspect esmtp + inspect sqlnet + inspect skinny + inspect sunrpc + inspect xdmcp + inspect sip + inspect netbios + inspect tftp + inspect ip-options +! +service-policy global_policy global +service-policy no-tcp-seq-rand interface inside +service-policy icmp_policy interface outside +prompt hostname context +! +jumbo-frame reservation +! +no call-home reporting anonymous +: end +ciscoasa# +---- +<1> Disable TCP sequence randomization + +==== TRex command example + +Using this command we send simple HTTP traffic with NAT learn mode and delay of 1 sec at start up +We added the delay because it seems ASA drop the first packets. +Latency is configured to ICMP with learn mode and learn mode to TCP-ACK mode. + +[source,bash] +---- +$sudo ./t-rex-64 -f cap2/http_simple.yaml -d 1000 -l 1000 --l-pkt-mode 2 -m 1000 --learn-mode 1 -k 1 +---- + +This is more realistic traffic for Enterprise (we removed from SFR the UDP traffic that has two sides as it is not supported right now) + +[source,bash] +---- +$sudo ./t-rex-64 -f avl/sfr_delay_10_1g_asa_nat.yaml -d 1000 --cfg /etc/trex_cfg4.yaml -l 1000 --l-pkt-mode 2 -m 4 --learn-verify --learn-mode 1 -k 1 +---- + +The TRex output + +[source,bash] +---- +-Per port stats table + ports | 0 | 1 + ----------------------------------------------------------------------------------------- + opackets | 106347896 | 118369678 + obytes | 33508291818 | 118433748567 + ipackets | 118378757 | 106338782 + ibytes | 118434305375 | 33507698915 + ierrors | 0 | 0 + oerrors | 0 | 0 + Tx Bw | 656.26 Mbps | 2.27 Gbps + +-Global stats enabled + Cpu Utilization : 18.4 % 31.7 Gb/core + Platform_factor : 1.0 + Total-Tx : 2.92 Gbps Nat_time_out : 9103 #<1> + Total-Rx : 2.92 Gbps Nat_no_fid : 0 + Total-PPS : 542.29 Kpps Total_nat_active: 7 + Total-CPS : 8.30 Kcps Total_nat_open : 3465246 + + Expected-PPS : 539.85 Kpps Nat_learn_errors: 0 + Expected-CPS : 8.29 Kcps + Expected-BPS : 2.90 Gbps + + Active-flows : 7860 Clients : 255 Socket-util : 0.0489 % + Open-flows : 3481234 Servers : 5375 Socket : 7860 Socket/Clients : 30.8 + drop-rate : 0.00 bps + current time : 425.1 sec + test duration : 574.9 sec + +-Latency stats enabled + Cpu Utilization : 0.3 % + if| tx_ok , rx_ok , rx ,error, average , max , Jitter , max window + | , , check, , latency(usec),latency (usec) ,(usec) , + ---------------------------------------------------------------------------------------------------------------- + 0 | 420510, 420495, 0, 1, 58 , 1555, 14 | 240 257 258 258 219 930 732 896 830 472 190 207 729 + 1 | 420496, 420509, 0, 1, 51 , 1551, 13 | 234 253 257 258 214 926 727 893 826 468 187 204 724 +---- +<1> this counter should be zero + + + -- cgit 1.2.3-korg From f904181221a52e6b008be977cdee51dd495e9285 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 20 Jan 2016 05:15:01 +0200 Subject: fix the apendix --- trex_book.asciidoc | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index d826b4a4..81da9513 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -717,13 +717,18 @@ TRex(0) -| |-TRex(1) === NAT support TRex can learn dynamic NAT/PAT translation. To enable this feature add `--learn-mode ` to the command line. -In mode 2, this is done by adding an IPv4 option header with TRex info (8 bytes long 0x10 id) to the first packet of the flow. -In mode 1, this is done by embedding NAT info in the ACK of the first TCP SYN. -In Mode 1 there is a limitation that only UDP from inside to outside are supported. UDP with both sides, for example DNS won't be supported in this mode. -This mode was added to support Cisco ASA that in default drop any +*mode 1:*:: -*Example:*:: +In this mode, It is done by embedding NAT info into the ACK of the first TCP SYN. +In this mode, there is a limitation that UDP templates with two directions won't be supported (e.g. DNS). +The reason for this feature is that Cisco ASA drops any packet with ipv4 option. + +*mode 2:*:: + +In this mode, it is done by adding an IPv4 option header with TRex info (8 bytes long 0x10 id) to the first packet of the flow. + +==== Examples *simple HTTP traffic* @@ -1400,10 +1405,8 @@ Running TRex aginst ASA 5585 has some limitation * There is a need to disable TCP sequence randomization using the command `set connection random-sequence-number disable` * ASA can't forward ipv4 options so there is a need to use --learn-mode 1 in case of NAT. In this mode UDP with two directions are not supported -* Client side traffic goes from TenGigabitEthernet0/8 server side from TenGigabitEthernet0/9 * Latency should be tested using ICMP with `--l-pkt-mode 2` - ==== ASA 5585 sample configuration [source,bash] @@ -1534,22 +1537,27 @@ ciscoasa# ---- <1> Disable TCP sequence randomization -==== TRex command example +==== TRex commands example + +Using these commands the configuration are: + +1. NAT learn mode (TCP-ACK) +2. Delay of 1 sec at start up (-k 1) it was added because ASA drop the first packets. +3. Latency is configured to ICMP -Using this command we send simple HTTP traffic with NAT learn mode and delay of 1 sec at start up -We added the delay because it seems ASA drop the first packets. -Latency is configured to ICMP with learn mode and learn mode to TCP-ACK mode. +*Simple HTTP:*:: [source,bash] ---- $sudo ./t-rex-64 -f cap2/http_simple.yaml -d 1000 -l 1000 --l-pkt-mode 2 -m 1000 --learn-mode 1 -k 1 ---- -This is more realistic traffic for Enterprise (we removed from SFR the UDP traffic that has two sides as it is not supported right now) +This is more realistic traffic for Enterprise (we removed from SFR the UDP traffic templates that have two sides as it is not supported right now). +*Enterprise profile:*:: [source,bash] ---- -$sudo ./t-rex-64 -f avl/sfr_delay_10_1g_asa_nat.yaml -d 1000 --cfg /etc/trex_cfg4.yaml -l 1000 --l-pkt-mode 2 -m 4 --learn-verify --learn-mode 1 -k 1 +$sudo ./t-rex-64 -f avl/sfr_delay_10_1g_asa_nat.yaml -d 1000 -l 1000 --l-pkt-mode 2 -m 4 --learn-mode 1 -k 1 ---- The TRex output -- cgit 1.2.3-korg From f9af868af6b818b34779bb68f0f829fb69ce7e57 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 20 Jan 2016 05:25:39 +0200 Subject: add background --- images/Thumbs.db | Bin 162816 -> 441856 bytes images/bg4.jpg | Bin 0 -> 231762 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/bg4.jpg diff --git a/images/Thumbs.db b/images/Thumbs.db index fc09ef9a..f618d2b0 100755 Binary files a/images/Thumbs.db and b/images/Thumbs.db differ diff --git a/images/bg4.jpg b/images/bg4.jpg new file mode 100644 index 00000000..e93f834b Binary files /dev/null and b/images/bg4.jpg differ -- cgit 1.2.3-korg From 02aaeab4612b27b686be4d73da51e942833a0815 Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Wed, 20 Jan 2016 09:05:45 +0200 Subject: Fixes to NAT documentation + some other small fixes --- trex_book.asciidoc | 65 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 81da9513..cfcb3c63 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -330,7 +330,7 @@ Now edit the configuration file with the right values from the previous section <3> The list of interface from `#>sudo ./dpdk_setup_ports.py -s`, in this example it was taken When working with VM, you must set the destination mac of one port as the source or the other for loopback the port in the vSwitch -and you should take the right value from the hypervisor (in case of a physical NIC you can set the mac-address with virtual you can't and you should take it from the hypervisor) +and you should take the right value from the hypervisor (in case of a physical NIC you can set the MAC address with virtual you can't and you should take it from the hypervisor) and example [source,python] @@ -437,15 +437,15 @@ zmq publisher at: tcp://*:4500 <13> Gb/sec generated per core of DP. Higer is better. <14> Rx and latency thread CPU utilization. -WARNING: if you don't see rx packets, revisit your mac-address configuration. +WARNING: if you don't see rx packets, revisit your MAC address configuration. ==== Running TRex for the first time with router You can follow this presentation link:trex_config_guide.html[first time TRex configuration] //TBD: Note that the link does not work correctly in PDF rendition or continue reading. -TRex set source-mac of all port to `00:00:00:01:00:00` and expected to get to this MAC-address `00:00:00:01:00:00` without a config file. -so you just need to configure router with the right mac-address. +TRex set source-mac of all port to `00:00:00:01:00:00` and expected to get to this MAC address `00:00:00:01:00:00` without a config file. +so you just need to configure router with the right MAC address. NOTE: Virtual routers on ESXi (for example, Cisco CSR1000v) must have a distinct MAC address for each port. Specify the address in the configuration file. see more xref:trex_config[here]. Another example is where the TRex is connected to a switch. In that case each of TRex port should have a distinc MAC address. @@ -562,7 +562,7 @@ In this case both ports will have the same amount of traffic. <6> TRex MAC-address destination port <7> PBR configuration rules -=== Static source MAC-address setting +=== Static source MAC address setting With this feature, TRex replaces the source MAC address with the client IP address. Note: This feature was requested by the Cisco ISG group. @@ -661,7 +661,7 @@ asr1k(config)#ipv6 route 5000::/64 3001::2 === Source MAC-address mapping using a file -Extending the source MAC-address replacment capability. +Extending the source MAC-address replacment capability. It is possible to have a mapping betwean IPv4->MAC using the new `--mac` CLI switch file format is YAML. @@ -684,12 +684,12 @@ $sudo ./t-rex-64 -f cap2/sfr_delay_10_1g.yaml -c 4 -l 100 -d 100000 -m 30 --ma *Limitations:*:: -. It is assumed that most of the clients has MAC-addrees. at least 90% of the IP should have a MAC-addrees mapping. +. It is assumed that most of the clients has MAC addrees. at least 90% of the IP should have a MAC addrees mapping. === Destination mac address spreadings anchor:mac_spread[] -Using this option, one can send traffic to a few destination devices. In normal mode all the packets are sent to the port destination mac-address. -to enable this option add this CLI `--mac-spread` to the command line +Using this option, one can send traffic to few destination devices. In normal mode all the packets are sent to the port destination mac-address. +to enable this option add `--mac-spread` to the command line. example: @@ -697,7 +697,7 @@ example: ---- $sudo ./t-rex-64 -f cap2/http_simple.yaml -d 1000 -m 1000 -c 4 -l 100 --mac-spread 2 ---- -in this case TRex will send to port destination mac and port destination mac +1 +In this case TRex will send to port destination mac and port destination mac +1 using a switch you could connect TRex to a few DUT. All the DUTs should return the traffic only to right port source address @@ -716,17 +716,20 @@ TRex(0) -| |-TRex(1) === NAT support -TRex can learn dynamic NAT/PAT translation. To enable this feature add `--learn-mode ` to the command line. +TRex can learn dynamic NAT/PAT translation. To enable this feature add `--learn-mode ` to the command line. +In order to learn the NAT translation, TRex must embed information describing the flow a packet belongs to, in the first +packet of each flow. This can be done in two different methods, depending on the chosen . *mode 1:*:: -In this mode, It is done by embedding NAT info into the ACK of the first TCP SYN. -In this mode, there is a limitation that UDP templates with two directions won't be supported (e.g. DNS). -The reason for this feature is that Cisco ASA drops any packet with ipv4 option. +Flow info is embedded in the ACK of the first TCP SYN. +In this mode, there is a limitation that bidirectional UDP templates (e.g. DNS) are not supported. +This mode was developed for testing NAT with firewalls (which usually can't work with mode 2). *mode 2:*:: -In this mode, it is done by adding an IPv4 option header with TRex info (8 bytes long 0x10 id) to the first packet of the flow. +Flow info is added in a special IPv4 option header (8 bytes long 0x10 id). The option is added only to the first packet in the flow. +This mode does not work with DUTs that drop packets with IP options (for example, Cisco ASA firewall). ==== Examples @@ -759,7 +762,7 @@ $sudo ./t-rex-64 -f avl/sfr_delay_10_1g_no_bundeling.yaml -c 4 -l 1000 -d 10000 <1> The number of translations with timeout should be zero. Usually this occurs when the router drops the flow due to NAT. <2> Translation not found. This can occur when there is large latency in the router input/output queue. <3> Active number of TRex traslation flows, should be low in the case of low RTT. -<4> A total of TRex translation. May be different from the total number of flows in case template is uni-directional (no need a translation). +<4> A total of TRex translation. May be different from the total number of flows in case template is uni-directional (and such does not need translation). *Configuration for Cisco ASR1000 Series:*:: @@ -816,17 +819,17 @@ access-list 8 permit 17.0.0.0 0.0.0.255 === Flow order/latency verification ( `--rx-check` ) -In normal mode (without this feature enabled), received traffic is not checked by software. It only counted by hardware (Intel NIC) for drop packets verification at test end of the test. The only exception is the Latency/Jitter packets. -This is one of the reasons that with TRex, you *cannot* check features that terminate traffic (for example TCP Proxy) -To enable this feature you should add to the command line options `--rx-check [sample]` where sample is the sample rate. -1/sample flows will be loaded to the software for verification. For 40Gb/Sec traffic you can use a sample of 1/128. Watch for Rx CPU% utilization. +In normal mode (without this feature enabled), received traffic is not checked by software. It is only counted by hardware (Intel NIC) for drop packets verification at the end of the test. The only exception is the Latency/Jitter packets. +This is one of the reasons that with TRex, you *cannot* check features that terminate traffic (for example TCP Proxy). +To enable this feature, you should add `--rx-check ` to the command line options, where sample is the sample rate. +1/sample of the flows will be sent to the software for verification. For 40Gb/Sec traffic you can use a sample of 1/128. Watch for Rx CPU% utilization. -INFO : This feature changes the TTL of the sample flows to 255 and expects 254 or 255 (one routing hop). If you have more than one hop in your setup, use `--hops` to change it to higher value. more than one hop could happned when there are number of routers betwean TRex client side to TRex server side. +INFO : This feature changes the TTL of the sampled flows to 255 and expects to get packets with TTL 254 or 255 (one routing hop). If you have more than one hop in your setup, use `--hops` to change it to a higher value. More than one hop is possible if there are number of routers betwean TRex client side and TRex server side. -With this feature enabled: +With this feature enabled, you can verify that: -* You can verify that packets get out of DUT in order (from each flow perspective) -* There are no dropped packets. There is no need to wait for the end of the test. Without this feature enabled you must wait for the end of the test to be aware of dropped packets because there is always a difference between TX and Rx due RTT. To be sure there is a need to stop the traffic and wait for the Rx traffic and this happens only at the end of the test. +* Packets get out of DUT in order (from each flow perspective) +* There are no packet drops (No need to wait for the end of the test). Without this flag, you must wait for the end of the test in order to identify packet drops, because there is always a difference between TX and Rx, due to RTT. .Full example @@ -1401,10 +1404,10 @@ You might need a power cycle and to run this command a few times to get the late === TRex with ASA 5585 -Running TRex aginst ASA 5585 has some limitation +Running TRex aginst ASA 5585 has some limitations: -* There is a need to disable TCP sequence randomization using the command `set connection random-sequence-number disable` -* ASA can't forward ipv4 options so there is a need to use --learn-mode 1 in case of NAT. In this mode UDP with two directions are not supported +* There is a need to disable TCP sequence randomization in ASA, using the command `set connection random-sequence-number disable` +* ASA can't forward ipv4 options, so there is a need to use --learn-mode 1 in case of NAT. In this mode, bidirectional UDP flows are not supported. * Latency should be tested using ICMP with `--l-pkt-mode 2` ==== ASA 5585 sample configuration @@ -1539,11 +1542,11 @@ ciscoasa# ==== TRex commands example -Using these commands the configuration are: +Using these commands the configuration is: 1. NAT learn mode (TCP-ACK) -2. Delay of 1 sec at start up (-k 1) it was added because ASA drop the first packets. -3. Latency is configured to ICMP +2. Delay of 1 second at start up (-k 1). It was added because ASA drops the first packets. +3. Latency is configured to ICMP reply mode (--l-pkt-mode 2). *Simple HTTP:*:: @@ -1552,7 +1555,7 @@ Using these commands the configuration are: $sudo ./t-rex-64 -f cap2/http_simple.yaml -d 1000 -l 1000 --l-pkt-mode 2 -m 1000 --learn-mode 1 -k 1 ---- -This is more realistic traffic for Enterprise (we removed from SFR the UDP traffic templates that have two sides as it is not supported right now). +This is more realistic traffic for enterprise (we removed from SFR the bidirectional UDP traffic templates. As described above, ther are not supported in this mode). *Enterprise profile:*:: [source,bash] -- cgit 1.2.3-korg From ec9f29e96798b2946b342d79f1f9356214186120 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Sun, 24 Jan 2016 11:59:24 +0200 Subject: typo --- trex_book_basic.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trex_book_basic.asciidoc b/trex_book_basic.asciidoc index 6e679114..105a5a1d 100755 --- a/trex_book_basic.asciidoc +++ b/trex_book_basic.asciidoc @@ -3229,7 +3229,7 @@ The YAML configuration is something like this: distribution : "seq" ip_start : "58.0.0.1" ip_end : "58.0.1.255" - dual_port_mask : "1.0.0.0 + dual_port_mask : "1.0.0.0" cap_info : - name: avl/delay_10_http_get_0.pcap cps : 404.52 -- cgit 1.2.3-korg From 6288f8988c0495969426dbb60a08ac82612acd0a Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 27 Jan 2016 10:20:53 +0200 Subject: v1.87 --- images/icons/Thumbs.db | Bin 27136 -> 27136 bytes release_notes.asciidoc | 5 +++++ 2 files changed, 5 insertions(+) diff --git a/images/icons/Thumbs.db b/images/icons/Thumbs.db index 97d755f2..2c3b4d0e 100755 Binary files a/images/icons/Thumbs.db and b/images/icons/Thumbs.db differ diff --git a/release_notes.asciidoc b/release_notes.asciidoc index ff314335..c66f0e27 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,11 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.87 == + +* Fix some 82599 ierror in case of high rate +* First Stateless API examples under api folder (not part of the package) + == Release 1.86 == -- cgit 1.2.3-korg From a8977423c118c0f65c557bcbd53c528061bbfbb5 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 27 Jan 2016 10:39:40 +0200 Subject: v1.88 --- release_notes.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index c66f0e27..c56929bd 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,12 @@ ifdef::backend-docbook[] endif::backend-docbook[] + +== Release 1.88 == + +* Add the Python API to the package +* Remove mock support + == Release 1.87 == * Fix some 82599 ierror in case of high rate -- cgit 1.2.3-korg From 6c5af947348c30a70c8257043f43f81c5d0e8a43 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Wed, 27 Jan 2016 10:44:24 +0200 Subject: OS install graphics troubleshooting --- trex_book.asciidoc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index cfcb3c63..d3174fcd 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -1607,4 +1607,11 @@ The TRex output <1> this counter should be zero +=== Troubleshoot installation, FAQ + +Q: During OS installation, screen is skewed / error "Out of range" / resolution not supported etc. + +A: + +* Fedora - during installation, choose "Troubleshooting" -> Install in basic graphic mode +* Ubuntu - try Ubuntu server, which has textual installation -- cgit 1.2.3-korg From ce8e0daf180ec7ca8da0f2ad91c7b723e371fd7e Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Tue, 9 Feb 2016 11:36:20 +0200 Subject: TRex update info on vm_manual --- trex_book.asciidoc | 2 ++ trex_vm_manual.asciidoc | 3 +++ 2 files changed, 5 insertions(+) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index d3174fcd..0de136d0 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -258,6 +258,8 @@ To obtain a specific version, do the following: $wget --no-cache $WEB_URL/release/vX.XX.tar.gz #<1> ---- +<1> X.XX = The version number + === Running TRex for the first time in loopback If you have 10Gb/sec TRex (based on Intel 520-D2 NICs) you can verify that it works correctly by loopback the ports. diff --git a/trex_vm_manual.asciidoc b/trex_vm_manual.asciidoc index ce7e570c..e01d770e 100755 --- a/trex_vm_manual.asciidoc +++ b/trex_vm_manual.asciidoc @@ -262,6 +262,9 @@ listening on enp0s8, link-type EN10MB (Ethernet), capture size 262144 bytes [NOTE] See http://trex-tgn.cisco.com/trex/doc/trex_book.pdf[T-Rex full manual] for a complete understading of the tool features and options. +=== Updating TRex + +See http://trex-tgn.cisco.com/trex/doc/trex_manual.html#_obtaining_the_trex_package[Related manual] section === TRex Live monitoring -- cgit 1.2.3-korg From 97301c4b01f3c7d253058c743779d0387c22bf5d Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 9 Feb 2016 13:36:00 +0200 Subject: v1.90 --- release_notes.asciidoc | 35 +++++++++++++++++++++++++++++++++++ trex_book.asciidoc | 2 +- 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index c56929bd..3a4723ca 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,41 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.90 == + +* Missing file in the pkg + +== Release 1.89 == + +* Integrate Scapy as a packet builder see `stl/profiles` folder +* Improve Python API, samples can be seen link:https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/api/stl/examples[here] +* Add Stateless simulator into the package + +Example how to run +[source,bash] +---- +./stl-sim -f stl/profiles/udp_1pkt.py -l 10 -o a.pcap #<1> +./stl-sim -f stl/profiles/udp_1pkt_tuple_gen.py -l 20 -o a.pcap #<2> +./stl-sim -f stl/profiles/imix.py -l 100 -o a.pcap --json #<3> +---- +<1> Limit the number of packets to 10 +<2> Tuple generator example +<3> imix + +The simulator takes Stateless profile,YAML or Py and output pcap file or json + +* Console can load the new Python profile + +[source,bash] +---- +TRex > start -f stl/profiles/udp_1pkt.py -a -m 1mbps +---- + +* Basic Python HLTAPI support + +=== fix issues: === + +* Dependent streams (e.g. `stl/burst_1000_pkt.yaml`) can be loaded == Release 1.88 == diff --git a/trex_book.asciidoc b/trex_book.asciidoc index d3174fcd..8d94b175 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -2,7 +2,7 @@ TRex ==== :author: hhaim :email: -:revnumber: 1.77-0.0 +:revnumber: 1.88 :quotes.++: :numbered: :web_server_url: http://trex-tgn.cisco.com/trex -- cgit 1.2.3-korg From d8f48b2b3c3b1808dbf1fa2f22ed09085f694e9c Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 11 Feb 2016 10:28:17 +0200 Subject: v1.91 --- release_notes.asciidoc | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 3a4723ca..0b76e897 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,47 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.91 == + +* Convert Stateless traffic profile to Scapy see `stl/*.py` sample folder +* Add HLTAPI tests and profile `stl/hlt/*.py` +* Fix simulator path issue fix +* The Stateless python library is not self-contained. in `automation/trex_control_plane/stl/` library is `automation/trex_control_plane/stl/trex_stl_lib/` (import trex_stl_lib) + +How to run the simulator +[source,bash] +---- +./stl-sim -f stl/udp_1pkt_range_clients_split.py -o b.pcap -l 100 -c 2 +---- + +* Add push command to convert pcap to streams + +------------------- +TRex > push --help +usage: push [-h] -f FILE [--port PORTS [PORTS ...] | -a] [-d TIME] + [-i IPG_USEC] [-s SPEEDUP] [--force] + +optional arguments: + -h, --help show this help message and exit + -f FILE File path to load + --port PORTS [PORTS ...] + A list of ports on which to apply the command + -a Set this flag to apply the command on all available + ports + -d TIME Set duration time for job. + -i IPG_USEC, --ipg IPG_USEC + IPG value in usec between packets. default will be + from the pcap + -s SPEEDUP, --speedup SPEEDUP + Factor to accelerate the injection. effectively means + IPG = IPG / SPEEDUP + --force Set if you want to stop active ports before appyling + command. +TRex >push -f cap2/dns.pcap --port 0 -i 10 +------------------- + + + == Release 1.90 == * Missing file in the pkg -- cgit 1.2.3-korg From 30022fdbaf321e34f21dff275012123f799ce6a4 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Thu, 11 Feb 2016 12:13:47 +0200 Subject: relation between ports, vSwitch limitation note --- trex_book.asciidoc | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 0de136d0..be1d2b48 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -439,7 +439,18 @@ zmq publisher at: tcp://*:4500 <13> Gb/sec generated per core of DP. Higer is better. <14> Rx and latency thread CPU utilization. -WARNING: if you don't see rx packets, revisit your MAC address configuration. +WARNING: If you don't see rx packets, revisit your MAC address configuration. + +==== Running TRex for the first time with virtual NICs + vSwitch + +[NOTE] +===================================================================== +Current limitation: following command will not work as excepted: +.... +sudo ./t-rex-64 -f cap2/dns.yaml --lm 1 --lo -l 1000 -d 100 +.... +vSwitch can't know where to "route" the packet, it supposed to be fixed once TRex supports ARP +===================================================================== ==== Running TRex for the first time with router @@ -1609,7 +1620,7 @@ The TRex output <1> this counter should be zero -=== Troubleshoot installation, FAQ +=== Troubleshoot common problems, FAQ Q: During OS installation, screen is skewed / error "Out of range" / resolution not supported etc. + A: @@ -1617,3 +1628,29 @@ A: * Fedora - during installation, choose "Troubleshooting" -> Install in basic graphic mode * Ubuntu - try Ubuntu server, which has textual installation +Q: How to determine relation between TRex ports and Router ports + +A: Run the TRex with following command and check incoming packet on router interfaces: + +sudo ./t-rex-64 -f cap2/dns.yaml --lm 1 --lo -l 1000 -d 100 + +Q: How to determine relation between Virtual OS ports and Hypervisor ports + +A: Compare the MACs address + name of interface, for example: + +* > ifconfig + +*eth0* Link encap:Ethernet *HWaddr 00:0c:29:2a:99:b2* + + ... + +* > sudo ./dpdk_setup_ports.py -s + +*03:00.0* 'VMXNET3 Ethernet Controller' *if=eth0* drv=vmxnet3 unused=igb_uio + +[NOTE] +===================================== +If at TRex side the NICs are not visible to ifconfig, run: + +.... +sudo ./dpdk_nic_bind.py -b <1> <2> +.... + +<1> driver name - vmxnet3 for VMXNET3 and e1000 for E1000 +<2> 03:00.0 for example + +We are planning to add MACs to `./dpdk_setup_ports.py -s` +===================================== -- cgit 1.2.3-korg From 8fb741f253baa267386a086cb12217737d21251c Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 14 Feb 2016 13:58:45 +0200 Subject: fix mcast addr --- trex_config.asciidoc | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/trex_config.asciidoc b/trex_config.asciidoc index 06bf3afc..abe9b644 100755 --- a/trex_config.asciidoc +++ b/trex_config.asciidoc @@ -122,10 +122,10 @@ image::images/TrexConfig.png[title="TRex/Router setup"] ---- - port_limit : 2 port_info : # set eh mac addr - - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] <1> - src_mac : [0x2,0x0,0x0,0x2,0x0,0x00] <2> - - dest_mac : [0x3,0x0,0x0,0x3,0x0,0x00] <3> - src_mac : [0x4,0x0,0x0,0x4,0x0,0x00] <4> + - dest_mac : [0x0,0x0,0x0,0x1,0x0,0x00] <1> + src_mac : [0x0,0x0,0x0,0x2,0x0,0x00] <2> + - dest_mac : [0x0,0x0,0x0,0x3,0x0,0x00] <3> + src_mac : [0x0,0x0,0x0,0x4,0x0,0x00] <4> ---- <1> Correspond to TRex port 0 - should be Router TenG 0/0/0 mac-address <2> Should be distinc mac-addrees, router should be configure to sent to this mac-addrees @@ -144,7 +144,7 @@ image::images/TrexConfig.png[title="TRex/Router setup"] [source,python] ---- interface TenGigabitEthernet0/0/0 - mac-address 0100.0001.0000 <1> + mac-address 0000.0001.0000 <1> mtu 4000 <2> ip address 11.11.11.11 255.255.255.0 <3> ip policy route-map p1_to_p2 <4> @@ -152,7 +152,7 @@ interface TenGigabitEthernet0/0/0 ! interface TenGigabitEthernet0/0/1 - mac-address 0300.0003.0000 <5> + mac-address 0000.0003.0000 <5> mtu 4000 ip address 12.11.11.11 255.255.255.0 ip policy route-map p2_to_p1 @@ -189,8 +189,8 @@ route-map p2_to_p1 permit 10 [source,python] ---- - arp 11.11.11.12 0200.0002.0000 ARPA <1> - arp 12.11.11.12 0400.0004.0000 ARPA <2> + arp 11.11.11.12 0000.0002.0000 ARPA <1> + arp 12.11.11.12 0000.0004.0000 ARPA <2> ---- <1> Destination mac-addrees of packets sent from If 0/0/0 is matched to TRex source mac-address port-0 <2> Destination mac-addrees of packets sent from If 0/0/1 is matched to TRex source mac-address port-1 @@ -221,13 +221,13 @@ generator : [source,python] ---- interface TenGigabitEthernet0/0/0 - mac-address 0100.0001.0000 + mac-address 0000.0001.0000 mtu 4000 ip address 11.11.11.11 255.255.255.0 ! ` interface TenGigabitEthernet0/0/1 - mac-address 0300.0003.0000 + mac-address 0000.0003.0000 mtu 4000 ip address 22.11.11.11 255.255.255.0 ! @@ -252,6 +252,12 @@ $./t-rex-64 -f cap2/dns.yaml -m 1 -d 100 -l 1000 --lo --lm 1 $./t-rex-64 -f cap2/dns.yaml -m 1 -d 100 -l 1000 --lo --lm 2 ........................................... +* In case you are connected to a Switch you must send packet from both direction first + +........................................... +$./t-rex-64 -f cap2/dns.yaml -m 1 -d 100 -l 1000 +........................................... + == Static-route configuration - IPV6 -- cgit 1.2.3-korg From 9f59b2e847f82714fabf9b60fc1a224cdc9fa213 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 14 Feb 2016 14:00:56 +0200 Subject: fix mcast addr1 --- trex_config.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/trex_config.asciidoc b/trex_config.asciidoc index abe9b644..285734d1 100755 --- a/trex_config.asciidoc +++ b/trex_config.asciidoc @@ -105,10 +105,10 @@ html, body { * TRex is directly connected to ASR1K ports. image::images/TrexConfig.png[title="TRex/Router setup"] -. TRex port 0 - clients side +. TRex port 0 - Client side . Router TenG 0/0/0 . Router TenG 0/0/1 -. TRex port 1 - servers side +. TRex port 1 - Server side == TRex configuration @@ -129,7 +129,7 @@ image::images/TrexConfig.png[title="TRex/Router setup"] ---- <1> Correspond to TRex port 0 - should be Router TenG 0/0/0 mac-address <2> Should be distinc mac-addrees, router should be configure to sent to this mac-addrees -<3> Correspond to TRex port 0 - should be Router TenG 0/0/1 mac-address +<3> Correspond to TRex port 1 - should be Router TenG 0/0/1 mac-address <4> Should be distinc mac-addrees, router should be configure to sent to this mac-addrees -- cgit 1.2.3-korg From 0bf2968992104b1a517d16df7187b30a37946258 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 18 Feb 2016 09:19:39 +0200 Subject: v1.92 --- release_notes.asciidoc | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 0b76e897..4b50b96e 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,56 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.92 == + +** Stream can set static Source/Destination MAC-Address as oppose to the default (TRex port from /etc/trex_config.yaml) + +[source,python] +---- +def create_stream (self): + base_pkt = Ether(src="00:00:dd:dd:00:01")/IP()/UDP() + pad = max(0, size - len(base_pkt)) * 'x' +---- + +** Stream support action_count. Loop of streams can end after action_count number. The push command uses this new feature to import pcap to streams and stop after x iteration. + +[source,python] +---- +STLStream( self_start = False, + name ='S2', + packet = STLPktBuilder(pkt = base_pkt2/pad), + mode = STLTXSingleBurst( pps = 10, total_pkts = 3 ), + action_count = 2, # loop 2 times + next = 'S0' + ) +---- + +** Support new Field-Engine instructions (variable with step and write with mask). See new sample folder and specification for more info + +[source,python] +---- +def create_stream (self): + + # 2 MPLS label the internal with s=1 (last one) + pkt = Ether()/MPLS(label=17,cos=1,s=0,ttl=255)/MPLS(label=0,cos=1,s=1,ttl=12)/IP()/UDP()/('x'*20) + + vm = CTRexScRaw( [ STLVmFlowVar(name="mlabel", min_value=1, max_value=2000, size=2, op="inc"), + STLVmWrMaskFlowVar(fv_name="mlabel", pkt_offset= "MPLS:1.label",pkt_cast_size=4, mask=0xFFFFF000,shift=12) # write mask + ] + ) +---- + +[source,python] +---- + vm = CTRexScRaw( [ STLVmFlowVar(name="mac_src", min_value=1, max_value=30, size=1, op="dec",step=7), # step + STLVmWrFlowVar(fv_name="mac_src", pkt_offset= 11) + ] + ) +---- + +** More profile samples (native/hlt) + + == Release 1.91 == * Convert Stateless traffic profile to Scapy see `stl/*.py` sample folder -- cgit 1.2.3-korg From a3c5f9da5c1e67e2171c7138c650e4cd4323498b Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 23 Feb 2016 09:08:46 +0200 Subject: update the spec --- trex_rpc_server_spec.asciidoc | 104 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 97 insertions(+), 7 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 2f12cc6e..cc2dadf1 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -34,6 +34,9 @@ The TRex RPC Server | 1.4 | Hanoch Haim (hhaim) | - add random trim instruction +| 1.5 | Hanoch Haim (hhaim) +| +- add more instructions (v1.92) |================= @@ -578,6 +581,8 @@ The format of that object is as follows: | Field | Type | Description | enabled | boolean | is this stream enabled | self_start | boolean | is this stream triggered by starting injection or triggered by another stream +| action_count | uint16_t | In case it is bigger than zero and next stream is not -1 (set) the number of goto will be limited to this number. Maximum value is 65K. default is zero. Zero means - not limit. +| flags | uint16_t | bit 0 (LSB) : 1 - take the src MAC from the packet instead of config file. bit 1-2 (LSB) how to set the dest MAC ( stCFG_FILE = 0, stPKT = 1,stARP = 2 ) | isg | double | ['usec'] inter stream gap - delay time in usec until the stream is started | next_stream_id | int | next stream to start after this stream. -1 means stop after this stream | packet | object | object of type xref:packet_obj['packet'] @@ -600,20 +605,31 @@ packet contains binary and meta data ===== Object type 'mode' anchor:mode_obj[] mode object can be 'one' of the following objects: +.Object type 'rate' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | [''pps'',''bps_L1'',''bps_L2'',''percentage'' +| value | double | rate +|================= + + .Object type 'mode - continuous' [options="header",cols="1,1,3"] |================= | Field | Type | Description | type | string | ''continuous'' -| pps | double | rate in packets per second +| rate | object | rate object |================= + + .Object type 'mode - single_burst' [options="header",cols="1,1,3"] |================= | Field | Type | Description | type | string | ''single_burst'' -| pps | double | rate in packets per second +| rate | object | rate object | total pkts | int | total packets in the burst |================= @@ -622,7 +638,7 @@ mode object can be 'one' of the following objects: |================= | Field | Type | Description | type | string | ''multi_burst'' -| pps | int | rate in packets per second +| rate | object | rate object | pkts_per_burst | int | packets in a single burst | ibg | double | ['usec'] inter burst gap. delay between bursts in usec | count | int | number of bursts. ''0'' means loop forever, ''1'' will fall back to single burst @@ -664,6 +680,7 @@ Any element in the array can be one of the following object types: | init_value | uint64_t as string | init value for the field | min_value | uint64_t as string | minimum value for the field | max_value | uint64_t as string | maximum value for the field +| step | uint64_t as string | step, how much to inc or dec. 1 is the default (in case of 'random' this field is not used) |================= .Object type 'vm - write_flow_var' @@ -730,6 +747,66 @@ an example of tuple_flow_var variable The variable name.port and name.ip could be written to any offset in the packet (usualy to src_ip and src_port as client) +.Object type 'vm - write_mask_flow_var' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | ''write_mask_flow_var''' +| name | string | flow variable name +| pkt_offset | uint16_t as string | offset at the packet to perform the write +| add_value | int32_t as string | delta to add to the field prior to writing - can be negative +| pkt_cast_size | uint_t as string | size in bytes only 1,2,4 are valid +| mask | uint32_t as string | 1 means care e.g. 0xff will write to only 8 LSB bits +| shift | int8_t as string | Positive will shift left (multiply by x2) negative will shift right (divided by 2) e.g. 1 will multiply by 2 +| is_big_endian | boolean | should write as big endian or little +|================= + +.Pseudocode +[source,bash] +---- + uint32_t val=(cast_to_size)rd_from_varible("name"); # read flow-var + val+=m_add_value; # add value + + if (m_shift>0) { # shift + val=val<>(-m_shift); + } + } + + pkt_val=rd_from_pkt(pkt_offset) # RMW + pkt_val = (pkt_val & ~m_mask) | (val & m_mask) + wr_to_pkt(pkt_offset,pkt_val) +---- + +an example of tuple_flow_var variable + +[source,bash] +---- + name = "a" (varible 2 byte start 1-10 inc ) + pkt_cast_size = 1 ( cast to uint8_t ) + add_value = 0 + mask = 0xf0 + shift = 4 + is_big_endian =1 +---- + +.Results +[options="header",cols="1,1,3"] +|================= +| var "a" | PKT- before write | PKT post write +| 1 | 0x03 | 0x13 +| 2 | 0x03 | 0x23 +| 3 | 0x03 | 0x33 +| 4 | 0x03 | 0x43 +| 5 | 0x03 | 0x53 +|================= + +The use cases of this instruction is to write to a bit field (valn/mpls) + + + TIP: For more information and examples on VM objects please refer to: link:vm_doc.html[VM examples] @@ -788,7 +865,11 @@ This could be stream_id different from the stream object which contains the rx_s "enabled": true, "isg": 4.3, "mode": { - "pps": 3, + "rate": { + "type": "pps", + "value": 10 + }, + "total_pkts": 5000, "type": "single_burst" }, @@ -1420,7 +1501,10 @@ On the following example, there's no VM instructions, rx_stats option is disable "enabled" : true, "isg" : 0, "mode" : { - "pps" : 100, + "rate": { + "type": "pps", + "value": 100 + }, "type" : "continuous" }, "next_stream_id" : -1, @@ -1497,7 +1581,10 @@ Ontop, this stream is the last stream of the sequence, so `next_stream_id` of `s "enabled" : true, "isg" : 0, "mode" : { - "pps" : 100, + "rate": { + "type": "pps", + "value": 100 + }, "type" : "continuous" }, "next_stream_id" : 1, @@ -1536,7 +1623,10 @@ Ontop, this stream is the last stream of the sequence, so `next_stream_id` of `s "enabled" : true, "isg" : 0, "mode" : { - "pps" : 200, + "rate": { + "type": "pps", + "value": 100 + }, "type" : "continuous" }, "next_stream_id" : -1, -- cgit 1.2.3-korg From 397f85ea15b2bed7202ad3e36cb6346a9552fd6a Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 24 Feb 2016 08:32:58 +0200 Subject: add random_seed --- trex_rpc_server_spec.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index cc2dadf1..716193fc 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -582,6 +582,7 @@ The format of that object is as follows: | enabled | boolean | is this stream enabled | self_start | boolean | is this stream triggered by starting injection or triggered by another stream | action_count | uint16_t | In case it is bigger than zero and next stream is not -1 (set) the number of goto will be limited to this number. Maximum value is 65K. default is zero. Zero means - not limit. +| random_seed | uint32_t | For creating reproducible tests with random number, each stream can get a seed. this field is optional. In case of zero the seed value won't be taken | flags | uint16_t | bit 0 (LSB) : 1 - take the src MAC from the packet instead of config file. bit 1-2 (LSB) how to set the dest MAC ( stCFG_FILE = 0, stPKT = 1,stARP = 2 ) | isg | double | ['usec'] inter stream gap - delay time in usec until the stream is started | next_stream_id | int | next stream to start after this stream. -1 means stop after this stream -- cgit 1.2.3-korg From 1b68b2c49731563b9a1d33c08b8f97399e073808 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 25 Feb 2016 11:59:43 +0200 Subject: v1.93 --- release_notes.asciidoc | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 4b50b96e..1d640f23 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,26 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.93 == + +* Support port attribute API and Console command. See `$portattr -a --prom` +* Support random seed per Stream attribute - see specification for more info +* Add more sample/profiles (stl/hlt) from real use cases +* Enhance Field Engine with new instructions +* TUI now shows L1 and L2 bandwidth. Console support L1 and L2 and %% +* Stream rate can be configured with PPS/bps_L1/bps_L2/port_percentage +* Update Stateless JSON-RPC specification +* HLT fixes and support split_by variable + +=== fix issues: === + +* Fix some typo in Python API stl/example folder +* Fix Field Engine IPv4 checksum issue with big packet size +* Fix Field Engine issue with random variables +* Fix `streams -a` crash +* Fix X710 issue. Now return speed of 10gb instead of 40gb for Stateless port speed + + == Release 1.92 == ** Stream can set static Source/Destination MAC-Address as oppose to the default (TRex port from /etc/trex_config.yaml) -- cgit 1.2.3-korg From 6684fac647f5322caa1a94c849cc85ee2d77b2f8 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Fri, 26 Feb 2016 05:04:07 +0200 Subject: typo fix --- release_notes.asciidoc | 1 + trex_book.asciidoc | 4 +- trex_config.asciidoc | 2 +- trex_control_plane_design_phase1.asciidoc | 240 ++++++++-------- trex_control_plane_peek.asciidoc | 450 +++++++++++++++--------------- trex_vm_manual.asciidoc | 4 +- 6 files changed, 351 insertions(+), 350 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 1d640f23..f052a72e 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -30,6 +30,7 @@ endif::backend-docbook[] * Stream rate can be configured with PPS/bps_L1/bps_L2/port_percentage * Update Stateless JSON-RPC specification * HLT fixes and support split_by variable +* First phase of per stream rx/tx statistic - XL710/X710 hardware support === fix issues: === diff --git a/trex_book.asciidoc b/trex_book.asciidoc index a037416c..f0ef132b 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -364,7 +364,7 @@ If successful, the output will be similar to the following: [source,python] ---- $ sudo ./t-rex-64 -f cap2/dns.yaml -d 100 -l 1000 -Starting T-Rex 1.50 please wait ... +Starting TRex 1.50 please wait ... zmq publisher at: tcp://*:4500 number of ports founded : 4 port : 0 @@ -668,7 +668,7 @@ asr1k(config)#ipv6 route 5000::/64 3001::2 <1> enable ipv6 <2> add pbr <3> enable ipv6 routing -<4> mac-addr setting should be like T-Rex +<4> mac-addr setting should be like TRex <5> PBR configuraion diff --git a/trex_config.asciidoc b/trex_config.asciidoc index 285734d1..88848c15 100755 --- a/trex_config.asciidoc +++ b/trex_config.asciidoc @@ -295,7 +295,7 @@ csi-mcp-asr1k-40(config)#ipv6 route 5000::/64 3001::2 <2> Add ipv6 addrees <3> Add pbr <4> Enable ipv6 routing -<5> Mac-addr setting should be like T-Rex +<5> Mac-addr setting should be like TRex <6> PBR configuraion diff --git a/trex_control_plane_design_phase1.asciidoc b/trex_control_plane_design_phase1.asciidoc index cdbd9def..663a51cc 100755 --- a/trex_control_plane_design_phase1.asciidoc +++ b/trex_control_plane_design_phase1.asciidoc @@ -1,4 +1,4 @@ -T-Rex Control Plane Design - Phase 1 +TRex Control Plane Design - Phase 1 ==================================== :author: Dan Klein :email: @@ -9,30 +9,30 @@ T-Rex Control Plane Design - Phase 1 == Introduction -=== T-Rex traffic generator +=== TRex traffic generator -T-Rex traffic generator is a tool design the benchmark platforms with realistic traffic. +TRex traffic generator is a tool design the benchmark platforms with realistic traffic. This is a work-in-progress product, which is under constant developement, new features are added and support for more router's fuctionality is achieved. -=== T-Rex Control Plane +=== TRex Control Plane -T-Rex control (phase 1) is the base API, based on which any future API will be developed. + -This document will describe the current control plane for T-Rex, and its scalable features as a directive for future developement. +TRex control (phase 1) is the base API, based on which any future API will be developed. + +This document will describe the current control plane for TRex, and its scalable features as a directive for future developement. -==== T-Rex Control Plane - Architecture and Deployment notes +==== TRex Control Plane - Architecture and Deployment notes -T-Rex control plane is based on a JSON RPC transactions between clients and server. + -Each T-Rex machine will have a server running on it, closely interacting with T-Rex (clients do not approach T-Rex directly). + -The server version (which runs as either a daemon or a CLI application) is deployed with T-Rex latest version, written in Python 2.7. +TRex control plane is based on a JSON RPC transactions between clients and server. + +Each TRex machine will have a server running on it, closely interacting with TRex (clients do not approach TRex directly). + +The server version (which runs as either a daemon or a CLI application) is deployed with TRex latest version, written in Python 2.7. As future feature, and as multiple T-Rexes might run on the same machine, single server shall serve all T-Rexes running a machine. -The control plane implementation is using the currently dumped data messaging from T-Rex's core via ZMQ publisher, running from core #1. +The control plane implementation is using the currently dumped data messaging from TRex's core via ZMQ publisher, running from core #1. The server used as a Subscriptor for this data, manipulating the packets, and re-encodes it into JSON-RPC format for clients use. + Since the entire process is taken place internally on the machine itself (using TCP connection with `localhost`), very little overhead is generated from outer network perspective. <<< -The following image describes the general architecture of the control plane and how it interacts with the data plane of T-Rex. +The following image describes the general architecture of the control plane and how it interacts with the data plane of TRex. ifdef::backend-docbook[] image::images/trex_control_plane_modules.png[title="Control Plane modules",align="center",width=450, link="images/trex_control_plane_modules.png"] @@ -42,7 +42,7 @@ ifdef::backend-xhtml11[] image::images/trex_control_plane_modules.png[title="Control Plane modules",align="center",width=900, link="images/trex_control_plane_modules.png"] endif::backend-xhtml11[] -The Python test script block represents any automation code or external module that wishes to control T-Rex by interacting with its server. +The Python test script block represents any automation code or external module that wishes to control TRex by interacting with its server. Such script can use other JSON-RPC based implementations of this CTRexClient module, as long as it corresponds with the known server methods and JSON-RPC protocol. @@ -51,14 +51,14 @@ At next phases, an under developement integrated module will serve the clients, == Using the API [NOTE] -Basic familiarity with T-Rex is recommended before using this tool. + -Further information can be learned from T-Rex manual: http://csi-wiki-01:8080/display/bpsim/Documentation[(T-Rex manual)] +Basic familiarity with TRex is recommended before using this tool. + +Further information can be learned from TRex manual: http://csi-wiki-01:8080/display/bpsim/Documentation[(TRex manual)] === The Server module -The server module is responsible for handling all possible requests related to T-Rex (i.e. this is the only mechanism that interacts with remote clients). + -The server is built as a multithreaded application, and **must be launched on a T-Rex commands using `sudo` permissions**. +The server module is responsible for handling all possible requests related to TRex (i.e. this is the only mechanism that interacts with remote clients). + +The server is built as a multithreaded application, and **must be launched on a TRex commands using `sudo` permissions**. The server application can run in one of two states: @@ -69,9 +69,9 @@ The server application can run in one of two states: ==== Launching the server -The server would run only on valid T-Rex machines or VM, due to delicate customization in used sub-modules, designed to eliminate the situation in which control and data plane packets are mixed. +The server would run only on valid TRex machines or VM, due to delicate customization in used sub-modules, designed to eliminate the situation in which control and data plane packets are mixed. -The server code is deployed by default with T-Rex (starting version 1.63 ) and can be launched from its path using the following command: + +The server code is deployed by default with TRex (starting version 1.63 ) and can be launched from its path using the following command: + `./trex_daemon_server [RUN_COMMAND] [options]` [NOTE] @@ -83,7 +83,7 @@ Running this command with `--help` option will prompt the help menu, explaning a The following daemon commands are supported: - 1. **`start`**: This option starts the daemon application of T-Rex server, using the following command options (detailed exmplanation on this next time). + 1. **`start`**: This option starts the daemon application of TRex server, using the following command options (detailed exmplanation on this next time). 2. **`stop`**: Stop the daemon application. @@ -104,7 +104,7 @@ Let's have a look on the help menu: NOTE: start/stop/restart options only available when running in daemon mode -Run server application for T-Rex traffic generator +Run server application for TRex traffic generator optional arguments: -h, --help show this help message and exit @@ -112,10 +112,10 @@ optional arguments: Select port on which the daemon runs. Default port is 8090. -z PORT, --zmq-port PORT - Select port on which the ZMQ module listens to T-Rex. + Select port on which the ZMQ module listens to TRex. Default port is 4500. #<2> -t PATH, --trex-path PATH - Specify the compiled T-Rex directory from which T-Rex + Specify the compiled TRex directory from which TRex would run. Default path is: / #<1> [root@trex-dan Server]# @@ -130,10 +130,10 @@ The available options are: 1. **`-p, --daemon-port`**: set the port on which the server is listening to clients requests. + Default listening server port is **`8090`**. - 2. **`-z, --zmq-port`**: set the port on which the server is listening to zmq publication from T-Rex. + + 2. **`-z, --zmq-port`**: set the port on which the server is listening to zmq publication from TRex. + Default listening server port is **`4500`**. - 3. **`-t, --trex-path`**: set the path from which T-Rex is runned. This is especially helpful when more than one version of T-Rex is used or switched between. Although this field has default value, it is highly recommended to set it manually with each server launch. + 3. **`-t, --trex-path`**: set the path from which TRex is runned. This is especially helpful when more than one version of TRex is used or switched between. Although this field has default value, it is highly recommended to set it manually with each server launch. [NOTE] When server is launched is first makes sure the trex-path is valid: the path 'exists' and granted with 'execution permissions.' If any of the conditions is not valid, the server will not launch. @@ -142,17 +142,17 @@ When server is launched is first makes sure the trex-path is valid: the path 'ex === The Client module The client is a Python based application that created `TRexClient` instances. + -Using class methods, the client interacts with T-Rex server, and enable it to perform the following commands: +Using class methods, the client interacts with TRex server, and enable it to perform the following commands: - 1. Start T-Rex run (custom parameters supported). + 1. Start TRex run (custom parameters supported). - 2. Stop T-Rex run. + 2. Stop TRex run. - 3. Check what is the T-Rex status (possible states: `Idle, Starting, Running`). + 3. Check what is the TRex status (possible states: `Idle, Starting, Running`). - 4. Poll (by customize sampling) the server and get live results from T-Rex **while still running**. + 4. Poll (by customize sampling) the server and get live results from TRex **while still running**. - 5. Get custom T-Rex stats based on a window of saved history of latest 'N' polling results. + 5. Get custom TRex stats based on a window of saved history of latest 'N' polling results. The clients is also based on Python 2.7, however unlike the server, it can run on any machine who wishes to. + In fact, the client side is simply a python library that interact with the server using JSON-RPC (v2), hence if needed, anyone can write a library on any other language that will interact with the server ins the very same way. @@ -160,21 +160,21 @@ In fact, the client side is simply a python library that interact with the serve ==== `CTRexClient` module initialization -As explained, `CTRexClient` is the main module to use when writing an T-Rex test-plan. + -This module holds the entire interaction with T-Rex server, and result containing via `result_obj`, which is an instance of `CTRexResult` class. + +As explained, `CTRexClient` is the main module to use when writing an TRex test-plan. + +This module holds the entire interaction with TRex server, and result containing via `result_obj`, which is an instance of `CTRexResult` class. + The `CTRexClient` instance is initialized in the following way: - 1. **T-Rex hostname**: represents the hostname on which the server is listening. Either hostname or IPv4 address will be a valid input. + 1. **TRex hostname**: represents the hostname on which the server is listening. Either hostname or IPv4 address will be a valid input. 2. **Server port**: the port on which the server listens to incoming client requests. This parameter value must be identical to `port` option configured in the server. - 3. **History size**: The number of saved T-Rex samples. Based on this "window", some extra statistics and data are calculated. Default history size is 100 samples. + 3. **History size**: The number of saved TRex samples. Based on this "window", some extra statistics and data are calculated. Default history size is 100 samples. - 4. **verbose **: This boolean option will prompt extended output, if available, of each of the activated methods. For any method that interacts with T-Rex server, this will prompt the JSON-RPC request and response. + + 4. **verbose **: This boolean option will prompt extended output, if available, of each of the activated methods. For any method that interacts with TRex server, this will prompt the JSON-RPC request and response. + This option is especially useful for developers who wishes to imitate the functionality of this client using other programming languages. **That's it!** + -Once these parameter has been passed, you're ready to interact with T-Rex. +Once these parameter has been passed, you're ready to interact with TRex. [NOTE] The most common initialization will simply use the hostname, such that common initilization lookes like: + @@ -185,61 +185,61 @@ The most common initialization will simply use the hostname, such that common in This section covers with great detail the usage of the client module. Each of the methods describes are class methods of `CTRexClient`. - **`start_trex (f, d, block_to_success, timeout, trex_cmd_options)`** + - Issue a request to start T-Rex with certain configuration. The server will only handle the request if the T-Rex is in `Idle` status. + - Once the status has been confirmed, T-Rex server will issue for this single client a token, so that only that client may abort running T-Rex session. + - `f` and `d` parameters are mandatory, as they are crucial parameter in setting T-Rex behaviour. Also, `d` parameter must be at least 30 seconds or larger. - By default (and by design) this method **blocks** until T-Rex status changes to either 'Running' or back to 'Idle'. + Issue a request to start TRex with certain configuration. The server will only handle the request if the TRex is in `Idle` status. + + Once the status has been confirmed, TRex server will issue for this single client a token, so that only that client may abort running TRex session. + + `f` and `d` parameters are mandatory, as they are crucial parameter in setting TRex behaviour. Also, `d` parameter must be at least 30 seconds or larger. + By default (and by design) this method **blocks** until TRex status changes to either 'Running' or back to 'Idle'. - **`stop_trex()`** + If (and only if) a certain client issued a run requested (and it accepted), this client may use this command to abort current run. + - This option is very useful especially when the real-time data from the T-Rex are utilized. + This option is very useful especially when the real-time data from the TRex are utilized. - **`wait_until_kickoff_finish(timeout = 40)`** + - This method blocks until T-Rex status changes to 'Running'. In case of error an exception will be thrown. + + This method blocks until TRex status changes to 'Running'. In case of error an exception will be thrown. + The 'timeout' parameter sets the maximum waiting time. + This method is especially useful when `block_to_success` was set to false in order to utilize the time to configure other things, such as DUT. - **`is_running(dump_out = False)`** + - Checks if there's currently T-Rex session up (with any client). + - If T-Rex is running, this method returns `True` and the result object id updated accordingly. + + Checks if there's currently TRex session up (with any client). + + If TRex is running, this method returns `True` and the result object id updated accordingly. + If not running, return `False`. + If a dictionary pointer is given in `dump_out` argument, the pointer object is cleared and the latest dump stored in it. - **`get_running_status()`** + - Fetches the current T-Rex status. + + Fetches the current TRex status. + Three possible states - * `Idle` - No T-Rex session is currently running. + * `Idle` - No TRex session is currently running. - * `Starting` - A T-Rex session just started (turns into Running after stability condition is reached) + * `Starting` - A TRex session just started (turns into Running after stability condition is reached) - * `Running` - T-Rex session is currently active. + * `Running` - TRex session is currently active. - The following diagram describes the state machine of T-Rex: + The following diagram describes the state machine of TRex: ifdef::backend-docbook[] -image::images/trex_control_plane_state_machine.png[title="T-Rex running state machine",align="center",width=280, link="images/trex_control_plane_state_machine.png"] +image::images/trex_control_plane_state_machine.png[title="TRex running state machine",align="center",width=280, link="images/trex_control_plane_state_machine.png"] endif::backend-docbook[] ifdef::backend-xhtml11[] -image::images/trex_control_plane_state_machine.png[title="T-Rex running state machine",align="center",width=400, link="images/trex_control_plane_state_machine.png"] +image::images/trex_control_plane_state_machine.png[title="TRex running state machine",align="center",width=400, link="images/trex_control_plane_state_machine.png"] endif::backend-xhtml11[] - **`get_running_info()`** + - This method performs single poll of T-Rex running data and process it into the result object (named `result_obj`). + - The method returns the most updated data dump from T-Rex in the form of Python dictionary. + + This method performs single poll of TRex running data and process it into the result object (named `result_obj`). + + The method returns the most updated data dump from TRex in the form of Python dictionary. + + Behind the scenes, running that method will trigger inner-client process over the saved window, and produce window-relevant information, as well as get the most important data more accessible. + Once the data has been fetched (at sample rate the satisfies the user), a custom data manipulation can be done in various forms and techniques footnote:[See `CTRexResult` module usage for more details]. + **Note: ** the sampling rate is bounded from buttom to 2 samples/sec. - **`sample_until_condition(condition_func, time_between_samples = 5)`** + - This method automatically sets ongoing sampling of T-Rex data, with sampling rate described by `time_between_samples`. On each fetched dump, the `condition_func` is applied on the result objects, and if returns `True`, the sampling will stop. + + This method automatically sets ongoing sampling of TRex data, with sampling rate described by `time_between_samples`. On each fetched dump, the `condition_func` is applied on the result objects, and if returns `True`, the sampling will stop. + On success (condition has been met), this method returns the latest result object that satisfied the given condition. + ON fail, this method will raise `UserWarning` exception. - **`sample_to_run_finish(time_between_samples = 5)`** + - This method automatically sets ongoing sampling of T-Rex data with sampling rate described by `time_between_samples` until T-Rex run finished. + This method automatically sets ongoing sampling of TRex data with sampling rate described by `time_between_samples` until TRex run finished. - **`get_result_obj()`** + Returns a pointer to the result object of the client instance. + @@ -249,16 +249,16 @@ TIP: The window stats (calculated when `get_running_info()` triggered) are very ==== `CTRexResult` module usage -This section covers how to use `CTRexResult` module to access into T-Rex data and post processing results, taking place at the client side whenever a data is polled from the server. + +This section covers how to use `CTRexResult` module to access into TRex data and post processing results, taking place at the client side whenever a data is polled from the server. + The most important data structure in this module is the `history` object, which contains the sampled information (plus the post processing step) of each sample. -Most of the class methods are getters that enables an easy access to the most commonly used when working with T-Rex. These getters are called with self-explained names, such as `get_max_latency`. + +Most of the class methods are getters that enables an easy access to the most commonly used when working with TRex. These getters are called with self-explained names, such as `get_max_latency`. + However, on top to these methods, the class offers data accessibility using the rest of the class methods. + These methods are: - **`is_done_warmup()`** + - This will return `True` only if T-Rex has reached its expected transmission bandwidth footnote:[A 3% deviation is allowed.]. + - This parameter is important since in most cases, the most relevent test cases are interesting when T-Rex produces its expected TX, based on which the platform is tested and benchmerked. + This will return `True` only if TRex has reached its expected transmission bandwidth footnote:[A 3% deviation is allowed.]. + + This parameter is important since in most cases, the most relevent test cases are interesting when TRex produces its expected TX, based on which the platform is tested and benchmerked. - **`get_latest_dump()`** + Fetches the latest polled dump saved in history. @@ -291,17 +291,17 @@ These methods are: == Usage Examples -=== Example #1: Checking T-Rex status and Launching T-Rex +=== Example #1: Checking TRex status and Launching TRex -The following program checks T-Rex status, and later on launches it, querying its status along different time slots. +The following program checks TRex status, and later on launches it, querying its status along different time slots. [source, python] ---- import time trex = CTRexClient('trex-name') -print "Before Running, T-Rex status is: ", trex.is_running() # <1> -print "Before Running, T-Rex status is: ", trex.get_running_status() # <2> +print "Before Running, TRex status is: ", trex.is_running() # <1> +print "Before Running, TRex status is: ", trex.get_running_status() # <2> ret = trex.start_trex( c = 2, # <3> m = 0.1, @@ -311,51 +311,51 @@ ret = trex.start_trex( c = 2, # <3> p = True, l = 1000) -print "After Starting, T-Rex status is: ", trex.is_running(), trex.get_running_status() +print "After Starting, TRex status is: ", trex.is_running(), trex.get_running_status() time.sleep(10) # <4> -print "Is T-Rex running? ", trex.is_running(), trex.get_running_status() # <5> +print "Is TRex running? ", trex.is_running(), trex.get_running_status() # <5> ---- -<1> `is_running()` returns a boolean and checks if T-Rex is running or not. +<1> `is_running()` returns a boolean and checks if TRex is running or not. -<2> `get_running_status()` returns a Python dictionary with T-Rex state, along with a verbose field containing extra info, if available. +<2> `get_running_status()` returns a Python dictionary with TRex state, along with a verbose field containing extra info, if available. -<3> T-Rex lanching. All types of inputs are supported. Some fields (such as 'f' and 'd' are mandatory). +<3> TRex lanching. All types of inputs are supported. Some fields (such as 'f' and 'd' are mandatory). -<4> Going to sleep for few seconds, allowing T-Rex to start. +<4> Going to sleep for few seconds, allowing TRex to start. -<5> Checking out with T-Rex status again, printing both a boolean return value and a full status. +<5> Checking out with TRex status again, printing both a boolean return value and a full status. -This code will prompt the following output, assuming a server was launched on the T-Rex machine. +This code will prompt the following output, assuming a server was launched on the TRex machine. ---- -Connecting to T-Rex @ http://trex-dan:8090/ ... -Before Running, T-Rex status is: False -Before Running, T-Rex status is: {u'state': , u'verbose': u'T-Rex is Idle'} +Connecting to TRex @ http://trex-dan:8090/ ... +Before Running, TRex status is: False +Before Running, TRex status is: {u'state': , u'verbose': u'TRex is Idle'} <1> <1> -After Starting, T-Rex status is: False {u'state': , u'verbose': u'T-Rex is starting'} +After Starting, TRex status is: False {u'state': , u'verbose': u'TRex is starting'} <1> <1> -Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} +Is TRex running? True {u'state': , u'verbose': u'TRex is Running'} <1> <1> ---- -<1> When looking at T-Rex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. +<1> When looking at TRex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. -=== Example #2: Checking T-Rex status and Launching T-Rex with 'BAD PARAMETERS' +=== Example #2: Checking TRex status and Launching TRex with 'BAD PARAMETERS' -The following program checks T-Rex status, and later on launches it with wrong input ('mdf' is not legal option), hence T-Rex run will not start and a message will be available. +The following program checks TRex status, and later on launches it with wrong input ('mdf' is not legal option), hence TRex run will not start and a message will be available. [source, python] ---- import time trex = CTRexClient('trex-name') -print "Before Running, T-Rex status is: ", trex.is_running() # <1> -print "Before Running, T-Rex status is: ", trex.get_running_status() # <2> +print "Before Running, TRex status is: ", trex.is_running() # <1> +print "Before Running, TRex status is: ", trex.get_running_status() # <2> ret = trex.start_trex( c = 2, # <3> #<4> mdf = 0.1, @@ -365,53 +365,53 @@ ret = trex.start_trex( c = 2, # <3> p = True, l = 1000) -print "After Starting, T-Rex status is: ", trex.is_running(), trex.get_running_status() +print "After Starting, TRex status is: ", trex.is_running(), trex.get_running_status() time.sleep(10) # <5> -print "Is T-Rex running? ", trex.is_running(), trex.get_running_status() # <6> +print "Is TRex running? ", trex.is_running(), trex.get_running_status() # <6> ---- -<1> `is_running()` returns a boolean and checks if T-Rex is running or not. +<1> `is_running()` returns a boolean and checks if TRex is running or not. -<2> `get_running_status()` returns a Python dictionary with T-Rex state, along with a verbose field containing extra info, if available. +<2> `get_running_status()` returns a Python dictionary with TRex state, along with a verbose field containing extra info, if available. -<3> T-Rex lanching. All types of inputs are supported. Some fields (such as 'f' and 'c' are mandatory). +<3> TRex lanching. All types of inputs are supported. Some fields (such as 'f' and 'c' are mandatory). <4> Wrong parameter ('mdf') injected. -<5> Going to sleep for few seconds, allowing T-Rex to start. +<5> Going to sleep for few seconds, allowing TRex to start. -<6> Checking out with T-Rex status again, printing both a boolean return value and a full status. +<6> Checking out with TRex status again, printing both a boolean return value and a full status. -This code will prompt the following output, assuming a server was launched on the T-Rex machine. +This code will prompt the following output, assuming a server was launched on the TRex machine. ---- -Connecting to T-Rex @ http://trex-dan:8090/ ... -Before Running, T-Rex status is: False -Before Running, T-Rex status is: {u'state': , u'verbose': u'T-Rex is Idle'} +Connecting to TRex @ http://trex-dan:8090/ ... +Before Running, TRex status is: False +Before Running, TRex status is: {u'state': , u'verbose': u'TRex is Idle'} <1> <1> -After Starting, T-Rex status is: False {u'state': , u'verbose': u'T-Rex is starting'} +After Starting, TRex status is: False {u'state': , u'verbose': u'TRex is starting'} <1> <1> -Is T-Rex running? False {u'state': , u'verbose': u'T-Rex run failed due to wrong input parameters, or due to reachability issues.'} +Is TRex running? False {u'state': , u'verbose': u'TRex run failed due to wrong input parameters, or due to reachability issues.'} <2> <2> ---- -<1> When looking at T-Rex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. +<1> When looking at TRex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. -<2> After T-Rex lanuching failed, a message indicating the failure reason. However, T-Rex is back Idle, ready to handle another launching request. +<2> After TRex lanuching failed, a message indicating the failure reason. However, TRex is back Idle, ready to handle another launching request. -=== Example #3: Launching T-Rex, let it run until custom condition is satisfied +=== Example #3: Launching TRex, let it run until custom condition is satisfied -The following program will launch T-Rex, and poll its result data until custom condition function returns `True`. + In this case, the condition function is simply named `condition`. + -Once the condition is met, T-Rex run will be terminated. +The following program will launch TRex, and poll its result data until custom condition function returns `True`. + In this case, the condition function is simply named `condition`. + +Once the condition is met, TRex run will be terminated. [source, python] ---- -print "Before Running, T-Rex status is: ", trex.get_running_status() +print "Before Running, TRex status is: ", trex.get_running_status() - print "Starting T-Rex..." + print "Starting TRex..." ret = trex.start_trex( c = 2, mdf = 0.1, d = 1000, @@ -429,26 +429,26 @@ print "Before Running, T-Rex status is: ", trex.get_running_status() val_list = res.get_value_list("trex-global.data", "m_tx_expected_\w+") #<4> ---- -<1> The `condition` function defines when to stop T-Rex. In this case, when T-Rex's current tx (in pps) exceeds 200000. +<1> The `condition` function defines when to stop TRex. In this case, when TRex's current tx (in pps) exceeds 200000. <2> The condition is passed to `sample_until_condition` method, which will block until either the condition is met or an 'Exception' is raised. -<3> Once satisfied, `res` variable holds the first result object on which the condition satisfied. At this point, T-Rex status is 'Idle' and another run can be requested from the server. +<3> Once satisfied, `res` variable holds the first result object on which the condition satisfied. At this point, TRex status is 'Idle' and another run can be requested from the server. -<4> Further custom processing can be made on the result object, regardless of other T-Rex runs. +<4> Further custom processing can be made on the result object, regardless of other TRex runs. <<< -=== Example #4: Launching T-Rex, monitor live data and stopping on demand +=== Example #4: Launching TRex, monitor live data and stopping on demand -The following program will launch T-Rex, and while it runs poll the server (every 5 seconds) for running inforamtion, such as latency, drops, and other extractable parameters. + -Then, after some criteria was met, T-Rex execution is terminated, enabeling others to use the resource instead of waiting for the entire execution to finish. +The following program will launch TRex, and while it runs poll the server (every 5 seconds) for running inforamtion, such as latency, drops, and other extractable parameters. + +Then, after some criteria was met, TRex execution is terminated, enabeling others to use the resource instead of waiting for the entire execution to finish. [source, python] ---- -print "Before Running, T-Rex status is: ", trex.get_running_status() +print "Before Running, TRex status is: ", trex.get_running_status() - print "Starting T-Rex..." + print "Starting TRex..." ret = trex.start_trex( c = 2, mdf = 0.1, d = 100, @@ -469,30 +469,30 @@ print "Before Running, T-Rex status is: ", trex.get_running_status() print obj.get_value_list("trex-global.data.m_tx_bps") time.sleep(5) #<3> - print "Terminating T-Rex..." + print "Terminating TRex..." ret = trex.stop_trex() #<4> ---- -<1> Iterate as long as T-Rex is running. + +<1> Iterate as long as TRex is running. + In this case the latest dump is also saved into `last_res` variable, so easier access for that data is available, although not needed most of the time. <2> Data processing. This is fully customizable for the relevant test initiated. <3> The sampling rate is flexibale and can be configured depending on the desired output. -<4> T-Rex termination. +<4> TRex termination. <<< -=== Example #5: Launching T-Rex, let it run until finished +=== Example #5: Launching TRex, let it run until finished -The following program will launch T-Rex, and poll it automatically until run finishes. The polling rate is customisable (in this case, every 10 seconds) using `time_between_samples` argument. +The following program will launch TRex, and poll it automatically until run finishes. The polling rate is customisable (in this case, every 10 seconds) using `time_between_samples` argument. [source, python] ---- -print "Before Running, T-Rex status is: ", trex.get_running_status() +print "Before Running, TRex status is: ", trex.get_running_status() - print "Starting T-Rex..." + print "Starting TRex..." ret = trex.start_trex( c = 2, #<1> mdf = 0.1, d = 1000, @@ -507,10 +507,10 @@ print "Before Running, T-Rex status is: ", trex.get_running_status() val_list = res.get_value_list("trex-global.data", "m_tx_expected_\w+") #<4> ---- -<1> T-Rex run initialization. +<1> TRex run initialization. -<2> Define the sample rate and block until T-Rex run ends. Once this method returns (assuming no error), T-Rex result object will contain the samples collected allong T-Rex run, limited to the history size footnoteref:[For example, For example for history sized 100 only the latest 100 samples will be available despite sampling more than that during T-Rex run.]. +<2> Define the sample rate and block until TRex run ends. Once this method returns (assuming no error), TRex result object will contain the samples collected allong TRex run, limited to the history size footnoteref:[For example, For example for history sized 100 only the latest 100 samples will be available despite sampling more than that during TRex run.]. <3> Once finished, `res` variable holds the latest result object. -<4> Further custom processing can be made on the result object, regardless of other T-Rex runs. \ No newline at end of file +<4> Further custom processing can be made on the result object, regardless of other TRex runs. \ No newline at end of file diff --git a/trex_control_plane_peek.asciidoc b/trex_control_plane_peek.asciidoc index 530da965..6d3aa134 100755 --- a/trex_control_plane_peek.asciidoc +++ b/trex_control_plane_peek.asciidoc @@ -1,225 +1,225 @@ -T-Rex Control Plane Design - Phase 1 peek -========================================= -:author: Dan Klein -:email: -:revnumber: 1.0 -:quotes.++: -:numbered: - - - -=== T-Rex traffic generator - -T-Rex traffic generator is a tool design the benchmark platforms with realistic traffic. -This is a work-in-progress product, which is under constant developement, new features are added and support for more router's fuctionality is achieved. - -=== T-Rex Control - -T-Rex control plane is under developement, and a phase 1 is planned to be published soon (Apr 2015). + -This document will shortly describe the planned control plane for T-Rex, which is planned to be more scalable and support automation more intuitively. - -==== T-Rex Control Plane - High Level - -T-Rex control plane is based on a JSON RPC transactions between clients and server. + -Each T-Rex machine will have a server running on it, closely interacting with T-Rex (clients do not approach T-Rex directly). + -As future feature, and as multiple T-Rexes might run on the same machine, single server shall serve all T-Rexes running a machine. - -The client is a Python based application that created `TRexClient` instances. + -Using class methods, the client interacts with T-Rex server, and enable it to perform the following commands: - - 1. Start T-Rex run (custom parameters supported). - - 2. Stop T-Rex run. - - 3. Check what is the T-Rex status (possible states: `Idle, Starting, Running`). - - 4. Poll (by customize sampling) the server and get live results from T-Rex **while still running**. - - 5. Get custom T-Rex stats based on a window of saved history of latest 'N' polling results. - - -==== T-Rex Control Plane - Example crumbs - - - - - **Exmaple #1: Checking T-Rex status and Launching T-Rex** - The following program checks T-Rex status, and later on launches it, querying its status along different time slots. - -[source, python] ----- -import time - -trex = CTRexClient('trex-name') -print "Before Running, T-Rex status is: ", trex.is_running() # <1> -print "Before Running, T-Rex status is: ", trex.get_running_status() # <2> - -ret = trex.start_trex( c = 2, # <3> - m = 0.1, - d = 20, - f = 'avl/sfr_delay_10_1g.yaml', - nc = True, - p = True, - l = 1000) - -print "After Starting, T-Rex status is: ", trex.is_running(), trex.get_running_status() - -time.sleep(10) # <4> - -print "Is T-Rex running? ", trex.is_running(), trex.get_running_status() # <5> ----- - -<1> `is_running()` returns a boolean and checks if T-Rex is running or not. - -<2> `get_running_status()` returns a Python dictionary with T-Rex state, along with a verbose field containing extra info, if available. - -<3> T-Rex lanching. All types of inputs are supported. Some fields (such as 'f' and 'c' are mandatory). - -<4> Going to sleep for few seconds, allowing T-Rex to start. - -<5> Checking out with T-Rex status again, printing both a boolean return value and a full status. - -This code will prompt the following output, assuming a server was launched on the T-Rex machine. - ----- -Connecting to T-Rex @ http://trex-dan:8090/ ... -Before Running, T-Rex status is: False -Before Running, T-Rex status is: {u'state': , u'verbose': u'T-Rex is Idle'} - <1> <1> - -After Starting, T-Rex status is: False {u'state': , u'verbose': u'T-Rex is starting'} - <1> <1> -Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} - <1> <1> ----- - -<1> When looking at T-Rex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. - - - * **Exmaple #2: Checking T-Rex status and Launching T-Rex with 'BAD PARAMETERS'** - The following program checks T-Rex status, and later on launches it with wrong input ('mdf' is not legal option), hence T-Rex run will not start and a message will be available. - -[source, python] ----- -import time - -trex = CTRexClient('trex-name') -print "Before Running, T-Rex status is: ", trex.is_running() # <1> -print "Before Running, T-Rex status is: ", trex.get_running_status() # <2> - -ret = trex.start_trex( c = 2, # <3> -#<4> mdf = 0.1, - d = 20, - f = 'avl/sfr_delay_10_1g.yaml', - nc = True, - p = True, - l = 1000) - -print "After Starting, T-Rex status is: ", trex.is_running(), trex.get_running_status() - -time.sleep(10) # <5> - -print "Is T-Rex running? ", trex.is_running(), trex.get_running_status() # <6> ----- - -<1> `is_running()` returns a boolean and checks if T-Rex is running or not. - -<2> `get_running_status()` returns a Python dictionary with T-Rex state, along with a verbose field containing extra info, if available. - -<3> T-Rex lanching. All types of inputs are supported. Some fields (such as 'f' and 'c' are mandatory). - -<4> Wrong parameter ('mdf') injected. - -<5> Going to sleep for few seconds, allowing T-Rex to start. - -<6> Checking out with T-Rex status again, printing both a boolean return value and a full status. - -This code will prompt the following output, assuming a server was launched on the T-Rex machine. ----- -Connecting to T-Rex @ http://trex-dan:8090/ ... -Before Running, T-Rex status is: False -Before Running, T-Rex status is: {u'state': , u'verbose': u'T-Rex is Idle'} - <1> <1> - -After Starting, T-Rex status is: False {u'state': , u'verbose': u'T-Rex is starting'} - <1> <1> -Is T-Rex running? False {u'state': , u'verbose': u'T-Rex run failed due to wrong input parameters, or due to reachability issues.'} - <2> <2> ----- - -<1> When looking at T-Rex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. - -<2> After T-Rex lanuching failed, a message indicating the failure reason. However, T-Rex is back Idle, ready to handle another launching request. - - - * **Exmaple #3: Launching T-Rex, monitor live data and stopping on demand** - The following program will launch T-Rex, and while it runs poll the server (every 5 seconds) for running inforamtion, such as latency, drops, and other extractable parameters. + - Then, after some criteria was met, T-Rex execution is terminated, enabeling others to use the resource instead of waiting for the entire execution to finish. - -[source, python] ----- -print "Before Running, T-Rex status is: ", trex.get_running_status() - - print "Starting T-Rex..." - ret = trex.start_trex( c = 2, - mdf = 0.1, - d = 100, - f = 'avl/sfr_delay_10_1g.yaml', - nc = True, - p = True, - l = 1000) - - print "After Starting, T-Rex status is: ", trex.is_running(), trex.get_running_status() - print "sleeping 20 secs.." - time.sleep(20) - for i in range(5): - print "Is T-Rex running? ", trex.is_running(), trex.get_running_status() #<1> - #<2> received_info = trex.get_running_info() - #<3> # Custom data processing is done here - #<4> time.sleep(5) - - print "Terminating T-Rex..." - #<5> ret = trex.stop_trex() - print "After stopping, T-Rex status is: ", trex.is_running(), trex.get_running_status() #<6> ----- - -<1> Running queries is still optional, although not mandatory in order to get stats. - -<2> `get_running_info()` will return the latest data dump available from T-Rex. + - Some aditional data manipulation and queries are under developement, including manipulation over number of dumps, which is useful for avoiding "spikes" of singular behavior. - -<3> Data processing. This is fully customizable for the relevant test initiated. - -<4> The sampling rate is flexibale and can be configured depending on the desired output. - -<5> T-Rex termination. - -<6> Post-termination check for status. - - -This code will prompt the following output, assuming a server was launched on the T-Rex machine. ----- -Connecting to T-Rex @ http://trex-dan:8090/ ... -Before Running, T-Rex status is: False -Before Running, T-Rex status is: {u'state': , u'verbose': u'T-Rex is Idle'} -Starting T-Rex... -After Starting, T-Rex status is: False {u'state': , u'verbose': u'T-Rex is starting'} - -<1> Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} - -<1> Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} - -<1> Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} - -<1> Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} - -<1> Is T-Rex running? True {u'state': , u'verbose': u'T-Rex is Running'} - -Before terminating, T-Rex status is: True {u'state': , u'verbose': u'T-Rex is Running'} -Terminating T-Rex... -#<2> After stopping, T-Rex status is: False {u'state': , u'verbose': u'T-Rex finished (terminated).'} - ----- - -<1> Polling T-Rex status while in a data polling loop. - -<2> After termination, we can see that T-Rex is back idle, also the `verbose` field shows the stop reason \ No newline at end of file +TRex Control Plane Design - Phase 1 peek +========================================= +:author: Dan Klein +:email: +:revnumber: 1.0 +:quotes.++: +:numbered: + + + +=== TRex traffic generator + +TRex traffic generator is a tool design the benchmark platforms with realistic traffic. +This is a work-in-progress product, which is under constant developement, new features are added and support for more router's fuctionality is achieved. + +=== TRex Control + +TRex control plane is under developement, and a phase 1 is planned to be published soon (Apr 2015). + +This document will shortly describe the planned control plane for TRex, which is planned to be more scalable and support automation more intuitively. + +==== TRex Control Plane - High Level + +TRex control plane is based on a JSON RPC transactions between clients and server. + +Each TRex machine will have a server running on it, closely interacting with TRex (clients do not approach TRex directly). + +As future feature, and as multiple T-Rexes might run on the same machine, single server shall serve all T-Rexes running a machine. + +The client is a Python based application that created `TRexClient` instances. + +Using class methods, the client interacts with TRex server, and enable it to perform the following commands: + + 1. Start TRex run (custom parameters supported). + + 2. Stop TRex run. + + 3. Check what is the TRex status (possible states: `Idle, Starting, Running`). + + 4. Poll (by customize sampling) the server and get live results from TRex **while still running**. + + 5. Get custom TRex stats based on a window of saved history of latest 'N' polling results. + + +==== TRex Control Plane - Example crumbs + + + + - **Exmaple #1: Checking TRex status and Launching TRex** + The following program checks TRex status, and later on launches it, querying its status along different time slots. + +[source, python] +---- +import time + +trex = CTRexClient('trex-name') +print "Before Running, TRex status is: ", trex.is_running() # <1> +print "Before Running, TRex status is: ", trex.get_running_status() # <2> + +ret = trex.start_trex( c = 2, # <3> + m = 0.1, + d = 20, + f = 'avl/sfr_delay_10_1g.yaml', + nc = True, + p = True, + l = 1000) + +print "After Starting, TRex status is: ", trex.is_running(), trex.get_running_status() + +time.sleep(10) # <4> + +print "Is TRex running? ", trex.is_running(), trex.get_running_status() # <5> +---- + +<1> `is_running()` returns a boolean and checks if TRex is running or not. + +<2> `get_running_status()` returns a Python dictionary with TRex state, along with a verbose field containing extra info, if available. + +<3> TRex lanching. All types of inputs are supported. Some fields (such as 'f' and 'c' are mandatory). + +<4> Going to sleep for few seconds, allowing TRex to start. + +<5> Checking out with TRex status again, printing both a boolean return value and a full status. + +This code will prompt the following output, assuming a server was launched on the TRex machine. + +---- +Connecting to TRex @ http://trex-dan:8090/ ... +Before Running, TRex status is: False +Before Running, TRex status is: {u'state': , u'verbose': u'TRex is Idle'} + <1> <1> + +After Starting, TRex status is: False {u'state': , u'verbose': u'TRex is starting'} + <1> <1> +Is TRex running? True {u'state': , u'verbose': u'TRex is Running'} + <1> <1> +---- + +<1> When looking at TRex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. + + + * **Exmaple #2: Checking TRex status and Launching TRex with 'BAD PARAMETERS'** + The following program checks TRex status, and later on launches it with wrong input ('mdf' is not legal option), hence TRex run will not start and a message will be available. + +[source, python] +---- +import time + +trex = CTRexClient('trex-name') +print "Before Running, TRex status is: ", trex.is_running() # <1> +print "Before Running, TRex status is: ", trex.get_running_status() # <2> + +ret = trex.start_trex( c = 2, # <3> +#<4> mdf = 0.1, + d = 20, + f = 'avl/sfr_delay_10_1g.yaml', + nc = True, + p = True, + l = 1000) + +print "After Starting, TRex status is: ", trex.is_running(), trex.get_running_status() + +time.sleep(10) # <5> + +print "Is TRex running? ", trex.is_running(), trex.get_running_status() # <6> +---- + +<1> `is_running()` returns a boolean and checks if TRex is running or not. + +<2> `get_running_status()` returns a Python dictionary with TRex state, along with a verbose field containing extra info, if available. + +<3> TRex lanching. All types of inputs are supported. Some fields (such as 'f' and 'c' are mandatory). + +<4> Wrong parameter ('mdf') injected. + +<5> Going to sleep for few seconds, allowing TRex to start. + +<6> Checking out with TRex status again, printing both a boolean return value and a full status. + +This code will prompt the following output, assuming a server was launched on the TRex machine. +---- +Connecting to TRex @ http://trex-dan:8090/ ... +Before Running, TRex status is: False +Before Running, TRex status is: {u'state': , u'verbose': u'TRex is Idle'} + <1> <1> + +After Starting, TRex status is: False {u'state': , u'verbose': u'TRex is starting'} + <1> <1> +Is TRex running? False {u'state': , u'verbose': u'TRex run failed due to wrong input parameters, or due to reachability issues.'} + <2> <2> +---- + +<1> When looking at TRex status, both an enum status (`Idle, Starting, Running`) and verbose output are available. + +<2> After TRex lanuching failed, a message indicating the failure reason. However, TRex is back Idle, ready to handle another launching request. + + + * **Exmaple #3: Launching TRex, monitor live data and stopping on demand** + The following program will launch TRex, and while it runs poll the server (every 5 seconds) for running inforamtion, such as latency, drops, and other extractable parameters. + + Then, after some criteria was met, TRex execution is terminated, enabeling others to use the resource instead of waiting for the entire execution to finish. + +[source, python] +---- +print "Before Running, TRex status is: ", trex.get_running_status() + + print "Starting TRex..." + ret = trex.start_trex( c = 2, + mdf = 0.1, + d = 100, + f = 'avl/sfr_delay_10_1g.yaml', + nc = True, + p = True, + l = 1000) + + print "After Starting, TRex status is: ", trex.is_running(), trex.get_running_status() + print "sleeping 20 secs.." + time.sleep(20) + for i in range(5): + print "Is TRex running? ", trex.is_running(), trex.get_running_status() #<1> + #<2> received_info = trex.get_running_info() + #<3> # Custom data processing is done here + #<4> time.sleep(5) + + print "Terminating TRex..." + #<5> ret = trex.stop_trex() + print "After stopping, TRex status is: ", trex.is_running(), trex.get_running_status() #<6> +---- + +<1> Running queries is still optional, although not mandatory in order to get stats. + +<2> `get_running_info()` will return the latest data dump available from TRex. + + Some aditional data manipulation and queries are under developement, including manipulation over number of dumps, which is useful for avoiding "spikes" of singular behavior. + +<3> Data processing. This is fully customizable for the relevant test initiated. + +<4> The sampling rate is flexibale and can be configured depending on the desired output. + +<5> TRex termination. + +<6> Post-termination check for status. + + +This code will prompt the following output, assuming a server was launched on the TRex machine. +---- +Connecting to TRex @ http://trex-dan:8090/ ... +Before Running, TRex status is: False +Before Running, TRex status is: {u'state': , u'verbose': u'TRex is Idle'} +Starting TRex... +After Starting, TRex status is: False {u'state': , u'verbose': u'TRex is starting'} + +<1> Is TRex running? True {u'state': , u'verbose': u'TRex is Running'} + +<1> Is TRex running? True {u'state': , u'verbose': u'TRex is Running'} + +<1> Is TRex running? True {u'state': , u'verbose': u'TRex is Running'} + +<1> Is TRex running? True {u'state': , u'verbose': u'TRex is Running'} + +<1> Is TRex running? True {u'state': , u'verbose': u'TRex is Running'} + +Before terminating, TRex status is: True {u'state': , u'verbose': u'TRex is Running'} +Terminating TRex... +#<2> After stopping, TRex status is: False {u'state': , u'verbose': u'TRex finished (terminated).'} + +---- + +<1> Polling TRex status while in a data polling loop. + +<2> After termination, we can see that TRex is back idle, also the `verbose` field shows the stop reason \ No newline at end of file diff --git a/trex_vm_manual.asciidoc b/trex_vm_manual.asciidoc index e01d770e..1ae13e81 100755 --- a/trex_vm_manual.asciidoc +++ b/trex_vm_manual.asciidoc @@ -1,4 +1,4 @@ -T-Rex Virtual Machine setup and basic usage +TRex Virtual Machine setup and basic usage =========================================== :author: Dan Klein :email: @@ -260,7 +260,7 @@ listening on enp0s8, link-type EN10MB (Ethernet), capture size 262144 bytes ---- [NOTE] -See http://trex-tgn.cisco.com/trex/doc/trex_book.pdf[T-Rex full manual] for a complete understading of the tool features and options. +See http://trex-tgn.cisco.com/trex/doc/trex_book.pdf[TRex full manual] for a complete understading of the tool features and options. === Updating TRex -- cgit 1.2.3-korg From 671cf52ebacece71822bc8b72e415ca7284efa20 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 2 Mar 2016 05:23:19 +0200 Subject: draft stateless support --- draft_trex_stateless-docinfo.html | 22 + draft_trex_stateless.asciidoc | 1046 +++++++++++++++++++++++++++++++++ images/Thumbs.db | Bin 441856 -> 518144 bytes images/stateless_objects.png | Bin 0 -> 20514 bytes images/stl_streams_example.png | Bin 0 -> 30788 bytes images/stl_tut_1.png | Bin 0 -> 22476 bytes images/stl_tut_12.png | Bin 0 -> 6751 bytes images/stl_tut_4.png | Bin 0 -> 17218 bytes images/trex_2.0_stateless.png | Bin 0 -> 1081192 bytes visio_drawings/trex_2.0_stateless.vsd | Bin 935424 -> 966656 bytes wscript | 3 + 11 files changed, 1071 insertions(+) create mode 100644 draft_trex_stateless-docinfo.html create mode 100644 draft_trex_stateless.asciidoc create mode 100644 images/stateless_objects.png create mode 100644 images/stl_streams_example.png create mode 100644 images/stl_tut_1.png create mode 100644 images/stl_tut_12.png create mode 100644 images/stl_tut_4.png create mode 100644 images/trex_2.0_stateless.png diff --git a/draft_trex_stateless-docinfo.html b/draft_trex_stateless-docinfo.html new file mode 100644 index 00000000..a444f506 --- /dev/null +++ b/draft_trex_stateless-docinfo.html @@ -0,0 +1,22 @@ + + + + + + + + + + + diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc new file mode 100644 index 00000000..bc5882cd --- /dev/null +++ b/draft_trex_stateless.asciidoc @@ -0,0 +1,1046 @@ +TRex +==== +:author: hhaim +:email: +:revnumber: 2.0 +:quotes.++: +:numbered: +:web_server_url: http://trex-tgn.cisco.com/trex +:local_web_server_url: csi-wiki-01:8181/trex +:toclevels: 4 + + +== Stateless support + +=== High level functionality + +* High scale - line rate 14MPPS per core, linear scale with number of cores +* Support 1/10/25/40/100 Gb/sec interfaces +* Interface can configured with multi traffic profiles +* Profile can support multi streams. Scale to 10K streams in parallel +* Each Stream +** Packet template - ability to build any packet using Scapy (e.g. MPLS/Ipv4/Ipv6/GRE/VXLAN/NSH) +** Field engine program +*** Ability to change any field inside the packet, for example src_ip = 10.0.0.1-10.0.0.255 +*** Ability to change the packet size (e.g. Random packet size 64-9K) + +** Mode -Continues/Burst/Multi burst support +** Rate can be specified in: +*** Packet per second -(e.g. 14MPPS) +*** L1 bandwidth (e.g. 500Mb/sec) +*** L2 bandwidth (e.g. 500Mb/sec) +*** Interface link percentage,( e.g. 10%) +** Support HLTAPI like profile definition +** Action- stream can trigger a stream +* Interactive support- Fast Console, GUI +* Statistic per interface +* Statistic per stream done in hardware +* Latency and Jitter per stream +* Blazing fast Automation support +** Python 2.7/3.0 Client API +** Python HLTAPI Client API +* Multi user support - multiple users can interact with the same TRex simultaneously + +==== Traffic profile example + +image::images/stl_streams_example.png[title="Streams example",align="left",width=600, link="images/stl_streams_example.png"] + +==== High level functionality - near future + +* ARP emulation - learn server MAC. Support unlimited of MAC addresses per port + +==== High level functionality - roadmap + +* Add emulation support +** RIP/BGP/ISIS/SPF + + +=== RPC Architecture + +To support interactive mode, JSON-RPC2 server added to the Control Plane + +The following diagram illustrates the RPC server component's + +image::images/trex_2.0_stateless.png[title="RPC Server Position",align="left",width=800, link="images/trex_2.0_stateless.png"] + +* The Control transport protocol is ZMQ working in REQ/RES mode +* JSON-RPC2 is the RPC protocol on top of the ZMQ REQ/RES +* Async transport is ZMQ working SUB/PUB mode. It is for async event such as interface change mode, counters etc. +* Python is the first Client to implement the Python automation API +* Console utilizes the Python API to implement a user interface to TRex + +For more detailed see RPC specification link:trex_rpc_server_spec.html[here] + +This Architecture provides the following advantages: + +* Fast interaction with TRex server. very fast load/start/stop profiles to an interface. +* Leveraging Python/Scapy for building a packet/Field engine +* HLTAPI compiler is done in Python. + + +=== Objects + + +image::images/stateless_objects.png[title="TRex Objects ",align="left",width=600, link="images/stateless_objects.png"] + +* *TRex*: Each TRex instance, includes a number of interfaces +* *Interface*: For each Interface it is possible to add/remove a number of traffic profiles (TP) +* *Traffic profile*: Each traffic profile includes a number of streams +* *Stream*: Each stream includes +** *Packet*: Packet template up to 9K bytes +** *Field Engine*: which field to change, do we want to change packet size +** *Mode*: how to send the packet. Continues/Burst/Multi Burst +** *Rx Stats* Which Statstistic to collect for each stream +** *Rate*: in Packet per second or bandwidth +** *Action*: The next stream to go after this stream is finished. Valid for Burst/Continues mode + +=== Tutorials + +This tutorial will walk you through basic but complete TRex Stateless use cases that will show you common concepts as well as slightly more advanced ones. + +==== Tutorial 1: Simple Ipv4/UDP packet - Simulator + +The following example demonstrates the most basic use case using our simulator. + +file: `stl/udp_1pkt_simple.py` + +[source,python] +---- +from trex_stl_lib.api import * + +class STLS1(object): + + def create_stream (self): + + return STLStream( + packet = + STLPktBuilder( + pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/ + UDP(dport=12,sport=1025)/(10*'x') <1> + ), + mode = STLTXCont()) <2> + + + def get_streams (self, direction = 0): + # create 1 stream + return [ self.create_stream() ] + + +# dynamic load - used for trex console or simulator +def register(): <3> + return STLS1() +---- +<1> Define the packet, in this case it IP/UDP with 10 bytes of 'x' +<2> Mode is Continues with rate of 1 PPS (default rate is 1 PPS) +<3> Each Traffic profile module should have a `register` function + + +Now let try to run it throw TRex simulator limiting the number of packet to 10 + +[source,bash] +---- +$ ./stl-sim -f stl/udp_1pkt_simple.py -o b.pcap -l 10 + executing command: 'bp-sim-64-debug --pcap --sl --cores 1 --limit 5000 -f /tmp/tmpq94Tfx -o b.pcap' + + General info: + ------------ + + image type: debug + I/O output: b.pcap + packet limit: 10 + core recording: merge all + + Configuration info: + ------------------- + + ports: 2 + cores: 1 + + Port Config: + ------------ + + stream count: 1 + max PPS : 1.00 pps + max BPS L1 : 672.00 bps + max BPS L2 : 512.00 bps + line util. : 0.00 % + + + Starting simulation... + + + Simulation summary: + ------------------- + + simulated 10 packets + written 10 packets to 'b.pcap' +---- + + +image::images/stl_tut_1.png[title="Wireshark Tutorial 1 output",align="left",width=800, link="images/stl_tut_1.png.png"] + + +.To look into the JSON command to the server +[source,bash] +---- +$./stl-sim -f stl/udp_1pkt_simple.py --json +[ + { + "id": 1, + "jsonrpc": "2.0", + "method": "add_stream", + "params": { + "handler": 0, + "port_id": 0, + "stream": { + "action_count": 0, + "enabled": true, + "flags": 0, + "isg": 0.0, + "mode": { + "rate": { + "type": "pps", + "value": 1.0 + }, + "type": "continuous" + }, + "next_stream_id": -1, + "packet": { + "binary": "AAAAAQAAAAAAAgAACABFAAAmAAEAAEAROsUQAAABMAAAAQQBAAwAEmFheHh4eHh4eHh4eA==", + "meta": "" + }, + "rx_stats": { + "enabled": false + }, + "self_start": true, + "vm": { + "instructions": [], + "split_by_var": "" + } + }, + "stream_id": 1 + } + }, + { + "id": 1, + "jsonrpc": "2.0", + "method": "start_traffic", + "params": { + "duration": -1, + "force": true, + "handler": 0, + "mul": { + "op": "abs", + "type": "raw", + "value": 1.0 + }, + "port_id": 0 + } + } +] + +---- + +For more detailed on Stream definition see RPC specification link:trex_rpc_server_spec.html#_add_stream[here] + + +.To look into the YAML profile +[source,bash] +---- +$./stl-sim -f stl/udp_1pkt_simple.py --yaml +- stream: + action_count: 0 + enabled: true + flags: 0 + isg: 0.0 + mode: + pps: 1.0 + type: continuous + packet: + binary: AAAAAQAAAAAAAgAACABFAAAmAAEAAEAROsUQAAABMAAAAQQBAAwAEmFheHh4eHh4eHh4eA== + meta: '' + rx_stats: + enabled: false + self_start: true + vm: + instructions: [] + split_by_var: '' +---- + + +.To look into the Packet detail try --pkt option +[source,bash] +---- +$./stl-sim -f stl/udp_1pkt_simple.py --pkt + ======================= + Stream 0 + ======================= +###[ Ethernet ]### + dst = 00:00:00:01:00:00 + src = 00:00:00:02:00:00 + type = IPv4 +###[ IP ]### + version = 4L + ihl = 5L + tos = 0x0 + len = 38 + id = 1 + flags = + frag = 0L + ttl = 64 + proto = udp + chksum = 0x3ac5 + src = 16.0.0.1 + dst = 48.0.0.1 + \options \ +###[ UDP ]### + sport = blackjack + dport = 12 + len = 18 + chksum = 0x6161 +###[ Raw ]### + load = 'xxxxxxxxxx' +0000 00 00 00 01 00 00 00 00 00 02 00 00 08 00 45 00 ..............E. +0010 00 26 00 01 00 00 40 11 3A C5 10 00 00 01 30 00 .&....@.:.....0. +0020 00 01 04 01 00 0C 00 12 61 61 78 78 78 78 78 78 ........aaxxxxxx +0030 78 78 78 78 xxxx +---- + +==== Tutorial 2: Simple Ipv4/UDP packet - TRex + +===== Run TRex as a server mode + +First run trex in interactive mode + +[source,bash] +---- +$sudo ./t-rex-64 -i +---- + + +===== Connect with Console + +From the same machine in a different terminal connect to to trex (you can do it from remote machine with -s [ip] + +from console you can run this + +[source,bash] +---- +$trex-console + +Connecting to RPC server on localhost:4501 [SUCCESS] +connecting to publisher server on localhost:4500 [SUCCESS] +Acquiring ports [0, 1, 2, 3]: [SUCCESS] + +125.69 [ms] + +TRex > start -f stl/udp_1pkt_simple.py -m 10mbps -a #<1> + +Removing all streams from port(s) [0, 1, 2, 3]: [SUCCESS] +Attaching 1 streams to port(s) [0, 1, 2, 3]: [SUCCESS] +Starting traffic on port(s) [0, 1, 2, 3]: [SUCCESS] + +# pause the traffic on all port +>pause -a #<2> + +# resume the traffic on all port +>resume -a #<3> + +# stop traffic on all port +>stop -a #<4> + +# show dynamic statistic +>tui +---- +<1> Start the traffic on all the ports in 10mbps. you can try with 14MPPS +<2> Pause the traffic +<3> Resume +<4> Stop on all the ports + + +To look into the streams using `streams -a` + +.Streams +[source,bash] +---- + +TRex > streams -a +Port 0: + + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 + +Port 1: + + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 + +Port 2: + + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 + +Port 3: + + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 + +TRex > +---- + + +to get help on a command run `command --help` + +to look into general statistics + +[source,bash] +---- +Global Statistics + +Connection : localhost, Port 4501 +Version : v1.93, UUID: N/A +Cpu Util : 0.2% + : +Total Tx L2 : 40.01 Mb/sec +Total Tx L1 : 52.51 Mb/sec +Total Rx : 40.01 Mb/sec +Total Pps : 78.14 Kpkt/sec + : +Drop Rate : 0.00 b/sec +Queue Full : 0 pkts + +Port Statistics + + port | 0 | 1 | + -------------------------------------------------------- + owner | hhaim | hhaim | + state | ACTIVE | ACTIVE | + -- | | | + Tx bps L2 | 10.00 Mbps | 10.00 Mbps | + Tx bps L1 | 13.13 Mbps | 13.13 Mbps | + Tx pps | 19.54 Kpps | 19.54 Kpps | + Line Util. | 0.13 % | 0.13 % | + --- | | | + Rx bps | 10.00 Mbps | 10.00 Mbps | + Rx pps | 19.54 Kpps | 19.54 Kpps | + ---- | | | + opackets | 1725794 | 1725794 | + ipackets | 1725794 | 1725794 | + obytes | 110450816 | 110450816 | + ibytes | 110450816 | 110450816 | + tx-bytes | 110.45 MB | 110.45 MB | + rx-bytes | 110.45 MB | 110.45 MB | + tx-pkts | 1.73 Mpkts | 1.73 Mpkts | + rx-pkts | 1.73 Mpkts | 1.73 Mpkts | + ----- | | | + oerrors | 0 | 0 | + ierrors | 0 | 0 | + + status: / + + browse: 'q' - quit, 'g' - dashboard, '0-3' - port display + dashboard: 'p' - pause, 'c' - clear, '-' - low 5%, '+' - up 5%, +---- + + +==== Tutorial 3: Simple Ipv4/UDP packet + +The following example demonstrates + +1. More than one stream +2. Burst of 10 packets +3. Stream activate a Stream (self_start=False) + + +file: `stl/burst_3pkt_60pkt.py` + + +[source,python] +---- + def create_stream (self): + + # create a base packet and pad it to size + size = self.fsize - 4; # no FCS + base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) + base_pkt1 = Ether()/IP(src="16.0.0.2",dst="48.0.0.1")/UDP(dport=12,sport=1025) + base_pkt2 = Ether()/IP(src="16.0.0.3",dst="48.0.0.1")/UDP(dport=12,sport=1025) + pad = max(0, size - len(base_pkt)) * 'x' + + + return STLProfile( [ STLStream( isg = 10.0, # star in delay + name ='S0', + packet = STLPktBuilder(pkt = base_pkt/pad), + mode = STLTXSingleBurst( pps = 10, total_pkts = 10), <1> + next = 'S1'), # point to next stream + + STLStream( self_start = False, # stream is disabled enable trow S0 <2> + name ='S1', + packet = STLPktBuilder(pkt = base_pkt1/pad), + mode = STLTXSingleBurst( pps = 10, total_pkts = 20), + next = 'S2' ), + + STLStream( self_start = False, # stream is disabled enable trow S0 <3> + name ='S2', + packet = STLPktBuilder(pkt = base_pkt2/pad), + mode = STLTXSingleBurst( pps = 10, total_pkts = 30 ) + ) + ]).get_streams() + +---- +<1> Stream S0 is with self_start=True start after 10 sec +<2> S1 with self_start=False. S0 activate it +<3> S2 is activate by S1 + +[source,bash] +---- +$ ./stl-sim -f stl/stl/burst_3pkt_600pkt.py -o b.pcap +---- + +The pcap file has 60 packet. The first 10 packets has src_ip=16.0.0.1. The next 10 packets has src_ip=16.0.0.2. The next 10 packets has src_ip=16.0.0.3 + +This profile can be run from Console using thed command + +[source,bash] +---- +TRex>start -f stl/stl/burst_3pkt_600pkt.py --port 0 +---- + + +==== Tutorial 4: Multi Burst mode + +file: `stl/multi_burst_2st_1000pkt.py` + + +[source,python] +---- + + def create_stream (self): + + # create a base packet and pad it to size + size = self.fsize - 4; # no FCS + base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) + base_pkt1 = Ether()/IP(src="16.0.0.2",dst="48.0.0.1")/UDP(dport=12,sport=1025) + pad = max(0, size - len(base_pkt)) * 'x' + + + return STLProfile( [ STLStream( isg = 10.0, # star in delay <1> + name ='S0', + packet = STLPktBuilder(pkt = base_pkt/pad), + mode = STLTXSingleBurst( pps = 10, total_pkts = 10), + next = 'S1'), # point to next stream + + STLStream( self_start = False, # stream is disabled enable trow S0 <2> + name ='S1', + packet = STLPktBuilder(pkt = base_pkt1/pad), + mode = STLTXMultiBurst( pps = 1000, + pkts_per_burst = 4, + ibg = 1000000.0, + count = 5) + ) + + ]).get_streams() + +---- +<1> Stream S0 wait 10 usec(isg) and send burst of 10 packet in 10 PPS rate +<2> Multi burst of 5 Burst of 4 packet with inter burst gap of one second + + +image::images/stl_tut_4.png[title="Streams example",align="left",width=600, link="images/stl_tut_4.png"] + + +==== Tutorial 5: Loops + +file: `stl/burst_3st_loop_x_times.py` + +[source,python] +---- + def create_stream (self): + + # create a base packet and pad it to size + size = self.fsize - 4; # no FCS + base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) + base_pkt1 = Ether()/IP(src="16.0.0.2",dst="48.0.0.1")/UDP(dport=12,sport=1025) + base_pkt2 = Ether()/IP(src="16.0.0.3",dst="48.0.0.1")/UDP(dport=12,sport=1025) + pad = max(0, size - len(base_pkt)) * 'x' + + + return STLProfile( [ STLStream( isg = 10.0, # star in delay + name ='S0', + packet = STLPktBuilder(pkt = base_pkt/pad), + mode = STLTXSingleBurst( pps = 10, total_pkts = 1), + next = 'S1'), # point to next stream + + STLStream( self_start = False, # stream is disabled enable trow S0 + name ='S1', + packet = STLPktBuilder(pkt = base_pkt1/pad), + mode = STLTXSingleBurst( pps = 10, total_pkts = 2), + next = 'S2' ), + + STLStream( self_start = False, # stream is disabled enable trow S0 + name ='S2', + packet = STLPktBuilder(pkt = base_pkt2/pad), + mode = STLTXSingleBurst( pps = 10, total_pkts = 3 ), + action_count = 2, # loop 2 times <1> + next = 'S0' # back to S0 loop + ) + ]).get_streams() + +---- +<1> go back to S0 but limit it to 2 loops + + +==== Tutorial 6: IMIX with UDP packets directional + +file: `stl/imix.py` + +[source,python] +---- + def __init__ (self): + # default IP range + self.ip_range = {'src': {'start': "10.0.0.1", 'end': "10.0.0.254"}, + 'dst': {'start': "8.0.0.1", 'end': "8.0.0.254"}} + + # default IMIX properties + self.imix_table = [ {'size': 60, 'pps': 28, 'isg':0 }, + {'size': 590, 'pps': 20, 'isg':0.1 }, + {'size': 1514, 'pps': 4, 'isg':0.2 } ] + + + def create_stream (self, size, pps, isg, vm ): + # create a base packet and pad it to size + base_pkt = Ether()/IP()/UDP() + pad = max(0, size - len(base_pkt)) * 'x' + + pkt = STLPktBuilder(pkt = base_pkt/pad, + vm = vm) + + return STLStream(isg = isg, + packet = pkt, + mode = STLTXCont(pps = pps)) + + + def get_streams (self, direction = 0): <1> + + if direction == 0: <2> + src = self.ip_range['src'] + dst = self.ip_range['dst'] + else: + src = self.ip_range['dst'] + dst = self.ip_range['src'] + + # construct the base packet for the profile + + vm =[ <3> + # src + STLVmFlowVar(name="src", + min_value=src['start'], + max_value=src['end'], + size=4,op="inc"), + STLVmWrFlowVar(fv_name="src",pkt_offset= "IP.src"), + + # dst + STLVmFlowVar(name="dst", + min_value=dst['start'], + max_value=dst['end'], + size=4, + op="inc"), + STLVmWrFlowVar(fv_name="dst",pkt_offset= "IP.dst"), + + # checksum + STLVmFixIpv4(offset = "IP") + + ] + + # create imix streams + return [self.create_stream(x['size'], x['pps'],x['isg'] , vm) for x in self.imix_table] +---- +<1> Base on the direction, we will construct a diffrent stream (replace src and dest) +<2> Even port id has direction==0 and odd has direction==1 +<3> We didn't explain this yet. but this is a Field Engine program to change fields inside the packets + + +==== Tutorial 7: Field Engine, Syn attack + +The following example demonstrates changing packet fields. +The Field Engine (FE) has limited number of instructions/operation for supporting most use cases. There is a plan to add LuaJIT to get 100% flexiable in the cost of performance. +The FE can allocate variable in Stream context. Write a variable to a packet offset, change packet size etc. + +*Some examples for what can be done:* + +* Change ipv4.tos 1-10 +* Change packet size to be random in range 64-9K +* Create range of flows (change src_ip,dest_ip,src_port,dest_port) +* Update Ipv4 checksum + +for more info see link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_obj_a[here] + +The following example demonstrates creating SYN attack from many src to one server. + +file: `stl/syn_attack.py` + +[source,python] +---- + def create_stream (self): + + # TCP SYN + base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S") <1> + + + # vm + vm = CTRexScRaw( [ STLVmFlowVar(name="ip_src", + min_value="16.0.0.0", + max_value="18.0.0.254", + size=4, op="random"), <2> + + STLVmFlowVar(name="src_port", + min_value=1025, + max_value=65000, + size=2, op="random"), <3> + + STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ), <4> + + STLVmFixIpv4(offset = "IP"), # fix checksum <5> + + STLVmWrFlowVar(fv_name="src_port", <6> + pkt_offset= "TCP.sport") # fix udp len + + ] + ) + + pkt = STLPktBuilder(pkt = base_pkt, + vm = vm) + + return STLStream(packet = pkt, + random_seed = 0x1234,# can be remove. will give the same random value any run + mode = STLTXCont()) +---- +<1> Create SYN packet using Scapy +<2> Define variable name=ip_src, 4 bytes size for IPv4. +<3> Define variable name=src_port, 2 bytes size for port. +<4> Write ip_src var into `IP.src` packet offset. Scapy calculate the offset. We could gave `IP:1.src" for second IP header in the packet +<5> Fix Ipv4 checksum. here we provide the header name `IP` we could gave `IP:1` for second IP +<6> Update TCP src port- TCP checksum is not updated here + +WARNING: Original Scapy does not have the capability to calculate offset for a header/field by name. This offset capability won't work for all the cases because there could be complex cases that Scapy rebuild the header. In such cases put offset as a number + +The output pcap file field can be seen here + +.Pcap file output +[format="csv",cols="1^,2^,1^", options="header"] +|================= +pkt,Client IPv4,Client Port + 1 , 17.152.71.218 , 5814 + 2 , 17.7.6.30 , 26810 + 3 , 17.3.32.200 , 1810 + 4 , 17.135.236.168 , 55810 + 5 , 17.46.240.12 , 1078 + 6 , 16.133.91.247, 2323 +|================= + + +==== Tutorial 8: Field Engine, Tuple Generator + +The following example demonstrates creating multiply flow from the same packet template. +The TupleGenerator instructions are used to create two variables with IP, port + +file: `stl/udp_1pkt_tuple_gen.py` + +[source,python] +---- + base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) + + pad = max(0, size - len(base_pkt)) * 'x' + + vm = CTRexScRaw( [ STLVmTupleGen ( ip_min="16.0.0.1", <1> + ip_max="16.0.0.2", + port_min=1025, + port_max=65535, + name="tuple"), # define tuple gen + + STLVmWrFlowVar (fv_name="tuple.ip", pkt_offset= "IP.src" ), <2> + STLVmFixIpv4(offset = "IP"), + STLVmWrFlowVar (fv_name="tuple.port", pkt_offset= "UDP.sport" ) <3> + ] + ); + + pkt = STLPktBuilder(pkt = base_pkt/pad, + vm = vm) +---- +<1> Define struct with two dependent varibles tuple.ip tuple.port +<2> Write tuple.ip to Ipv4 src field offset +<3> Write tuple.port to UDP header. You should set UDP.checksum to zero + + +.Pcap file output +[format="csv",cols="1^,2^,1^", options="header"] +|================= +pkt,Client IPv4,Client Port + 1 , 16.0.0.1 , 1025 + 2 , 16.0.0.2 , 1025 + 3 , 16.0.0.1 , 1026 + 4 , 16.0.0.2 , 1026 + 5 , 16.0.0.1 , 1027 + 6 , 16.0.0.2, 1027 +|================= + +* Number of clients are two 16.0.0.1 and 16.0.0.2 +* Number of flows is limited to 129020 (2*65535-1025) +* The variable size should match the size of the FlowVarWr instruction + +==== Tutorial 9: Field Engine, write to a bit-field packet + +The following example demonstrates a way to write a variable to a bit field packet variables. +In this example MPLS label field will be changed. + +.MPLS header +[cols="32", halign="center"] +|==== +20+<|Label 3+<|TC 1+<|S 8+<|TTL| +0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1| +|==== + +file: `stl/udp_1pkt_mpls_vm.py` + +[source,python] +---- + + def create_stream (self): + # 2 MPLS label the internal with s=1 (last one) + pkt = Ether()/ + MPLS(label=17,cos=1,s=0,ttl=255)/ + MPLS(label=0,cos=1,s=1,ttl=12)/ + IP(src="16.0.0.1",dst="48.0.0.1")/ + UDP(dport=12,sport=1025)/('x'*20) + + vm = CTRexScRaw( [ STLVmFlowVar(name="mlabel", <1> + min_value=1, + max_value=2000, + size=2, op="inc"), # 2 bytes var <2> + STLVmWrMaskFlowVar(fv_name="mlabel", + pkt_offset= "MPLS:1.label", <3> + pkt_cast_size=4, + mask=0xFFFFF000,shift=12) # write to 20bit MSB + ] + ) + + # burst of 100 packets + return STLStream(packet = STLPktBuilder(pkt = pkt ,vm = vm), + mode = STLTXSingleBurst( pps = 1, total_pkts = 100) ) + +---- +<1> Define varible size of 2 bytes +<2> Write the variable label with a shift of 12 bits and with 20bit MSB mask. Cast the variables of 2 bytes to 4 bytes +<3> Second MPLS header should be changed + + +==== Tutorial 10: Field Engine, Random packet size + +The following example demonstrates a way to to change packet size to be a random size. +The way to do it is: +1. Define template packet with maximum size +2. Trim the packet to the size you want +3. Update the packet fields to the new size + +file: `stl/udp_rand_len_9k.py` + +[source,python] +---- + + def create_stream (self): + # pkt + p_l2 = Ether(); + p_l3 = IP(src="16.0.0.1",dst="48.0.0.1") + p_l4 = UDP(dport=12,sport=1025) + pyld_size = max(0, self.max_pkt_size_l3 - len(p_l3/p_l4)); + base_pkt = p_l2/p_l3/p_l4/('\x55'*(pyld_size)) + + l3_len_fix =-(len(p_l2)); + l4_len_fix =-(len(p_l2/p_l3)); + + + # vm + vm = CTRexScRaw( [ STLVmFlowVar(name="fv_rand", <1> + min_value=64, + max_value=len(base_pkt), + size=2, + op="random"), + + STLVmTrimPktSize("fv_rand"), # total packet size <2> + + STLVmWrFlowVar(fv_name="fv_rand", <3> + pkt_offset= "IP.len", + add_val=l3_len_fix), # fix ip len + + STLVmFixIpv4(offset = "IP"), + + STLVmWrFlowVar(fv_name="fv_rand", <4> + pkt_offset= "UDP.len", + add_val=l4_len_fix) # fix udp len + ] + ) +---- +<1> Define a random variable with maximum size of the packet +<2> Trim the packet size to the fv_rand value +<3> fix ip.len +<4> fix udp.len + + +==== Tutorial 11: New Scapy header + +The following example demonstrates a way to use a header the is not supported by Scapy. +In this case this is VXLAN + + +file: `stl/udp_1pkt_vxlan.py` + + +[source,python] +---- + +# Adding header that does not exists yet in Scapy +# This was taken from pull request of Scapy +# + + +# RFC 7348 - Virtual eXtensible Local Area Network (VXLAN): <1> +# A Framework for Overlaying Virtualized Layer 2 Networks over Layer 3 Networks +# http://tools.ietf.org/html/rfc7348 +_VXLAN_FLAGS = ['R' for i in range(0, 24)] + ['R', 'R', 'R', 'I', 'R', 'R', 'R', 'R', 'R'] + +class VXLAN(Packet): + name = "VXLAN" + fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS), + ThreeBytesField("vni", 0), + XByteField("reserved", 0x00)] + + def mysummary(self): + return self.sprintf("VXLAN (vni=%VXLAN.vni%)") + +bind_layers(UDP, VXLAN, dport=4789) +bind_layers(VXLAN, Ether) + + +class STLS1(object): + + def __init__ (self): + pass; + + def create_stream (self): + pkt = Ether()/IP()/UDP(sport=1337,dport=4789)/VXLAN(vni=42)/Ether()/IP()/('x'*20) <2> + #pkt.show2() + #hexdump(pkt) + + # burst of 17 packets + return STLStream(packet = STLPktBuilder(pkt = pkt ,vm = []), + mode = STLTXSingleBurst( pps = 1, total_pkts = 17) ) + + +---- +<1> Download and and add the scapy header or write it +<2> Use it + +For more information how to define headers see Scapy link:http://www.secdev.org/projects/scapy/doc/build_dissect.html[here] + + +==== Tutorial 12: Field Engine, Many clients + +The following example demonstrates a way to generate traffic from many clients with different IP/MAC to one server. +The following figure demonstrate what e want to achieve + +image::images/stl_tut_12.png[title="client->server",align="left",width=600, link="images/stl_tut_12.png"] + +1. Send gratuitous ARP from B->D with server IP/MAC +2. DUT learn the ARP of Server IP/MAC +3. Send traffic from A->C with many Clients IP's/MAC's + +Let's take an example: + +Base source IPv4 : 55.55.1.1 +Destination IPv4: 58.0.0.1 + +Increment src ipt portion starting at 55.55.1.1 for 'n' number of clients (55.55.1.1, 55.55.1.2) +Src MAC: start with 0000.dddd.0001, increment mac in steps of 1 +Dst MAC: Fixed - will be taken from trex_conf.yaml + +To send gratuitous ARP from TRex server side for this server (58.0.0.1) + +[source,python] +---- + + def create_stream (self): + # create a base packet and pad it to size + base_pkt = Ether(src="00:00:dd:dd:00:01",dst="ff:ff:ff:ff:ff:ff")/ARP(psrc="58.0.0.1",hwsrc="00:00:dd:dd:00:01", hwdst="00:00:dd:dd:00:01", pdst="58.0.0.1") + +---- + +Then we can send the clients traffic from A->C + + +file: `stl/udp_1pkt_range_clients_split.py` + +[source,python] +---- +class STLS1(object): + + def __init__ (self): + self.num_clients =30000; # max is 16bit + self.fsize =64 + + def create_stream (self): + + # create a base packet and pad it to size + size = self.fsize - 4; # no FCS + base_pkt = Ether(src="00:00:dd:dd:00:01")/ + IP(src="55.55.1.1",dst="58.0.0.1")/UDP(dport=12,sport=1025) + pad = max(0, size - len(base_pkt)) * 'x' + + vm = CTRexScRaw( [ STLVmFlowVar(name="mac_src", + min_value=1, + max_value=self.num_clients, + size=2, op="inc"), # 1 byte varible, range 1-10 + + STLVmWrFlowVar(fv_name="mac_src", pkt_offset= 10), <1> + STLVmWrFlowVar(fv_name="mac_src" , + pkt_offset="IP.src", + offset_fixup=2), <2> + STLVmFixIpv4(offset = "IP") + ] + ,split_by_field = "mac_src" # split + ) + + return STLStream(packet = STLPktBuilder(pkt = base_pkt/pad,vm = vm), + mode = STLTXCont( pps=10 )) +---- +<1> Write the variable mac_src with offset of 10 (last 2 bytes of src_mac field) +<2> Write the variable mac_src with `offset_fixup` of 2. beacuse we write it with offset + + + +=== Reference + +=== Stream + +==== Packet + +==== Field Engine commands + +==== Modes + +=== Console commands + +=== Python API + + + + + + + + + + + diff --git a/images/Thumbs.db b/images/Thumbs.db index f618d2b0..309dd91e 100755 Binary files a/images/Thumbs.db and b/images/Thumbs.db differ diff --git a/images/stateless_objects.png b/images/stateless_objects.png new file mode 100644 index 00000000..f16924da Binary files /dev/null and b/images/stateless_objects.png differ diff --git a/images/stl_streams_example.png b/images/stl_streams_example.png new file mode 100644 index 00000000..6c10e9d2 Binary files /dev/null and b/images/stl_streams_example.png differ diff --git a/images/stl_tut_1.png b/images/stl_tut_1.png new file mode 100644 index 00000000..24aa26fc Binary files /dev/null and b/images/stl_tut_1.png differ diff --git a/images/stl_tut_12.png b/images/stl_tut_12.png new file mode 100644 index 00000000..0db7f117 Binary files /dev/null and b/images/stl_tut_12.png differ diff --git a/images/stl_tut_4.png b/images/stl_tut_4.png new file mode 100644 index 00000000..dbe95fba Binary files /dev/null and b/images/stl_tut_4.png differ diff --git a/images/trex_2.0_stateless.png b/images/trex_2.0_stateless.png new file mode 100644 index 00000000..01787f99 Binary files /dev/null and b/images/trex_2.0_stateless.png differ diff --git a/visio_drawings/trex_2.0_stateless.vsd b/visio_drawings/trex_2.0_stateless.vsd index d46f2a59..4ae6420d 100755 Binary files a/visio_drawings/trex_2.0_stateless.vsd and b/visio_drawings/trex_2.0_stateless.vsd differ diff --git a/wscript b/wscript index 28ab986d..c510d721 100755 --- a/wscript +++ b/wscript @@ -178,6 +178,9 @@ def build(bld): bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', source='trex_book.asciidoc waf.css', target='trex_manual.html', scan=ascii_doc_scan) + bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', + source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless.html', scan=ascii_doc_scan) + bld(rule=convert_to_pdf_book, source='trex_book.asciidoc waf.css', target='trex_book.pdf', scan=ascii_doc_scan) -- cgit 1.2.3-korg From 04839f3d4ad5e6de0f3260bb726e70482d035193 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 2 Mar 2016 13:39:26 +0200 Subject: minor --- draft_trex_stateless.asciidoc | 3 +-- images/trex_2_stateless.png | Bin 0 -> 1081192 bytes wscript | 3 +++ 3 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 images/trex_2_stateless.png diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index bc5882cd..a9195d46 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -61,7 +61,7 @@ To support interactive mode, JSON-RPC2 server added to the Control Plane The following diagram illustrates the RPC server component's -image::images/trex_2.0_stateless.png[title="RPC Server Position",align="left",width=800, link="images/trex_2.0_stateless.png"] +image::images/trex_2_stateless.png[title="RPC Server Position",align="left",width=800, link="images/trex_2_stateless.png"] * The Control transport protocol is ZMQ working in REQ/RES mode * JSON-RPC2 is the RPC protocol on top of the ZMQ REQ/RES @@ -80,7 +80,6 @@ This Architecture provides the following advantages: === Objects - image::images/stateless_objects.png[title="TRex Objects ",align="left",width=600, link="images/stateless_objects.png"] * *TRex*: Each TRex instance, includes a number of interfaces diff --git a/images/trex_2_stateless.png b/images/trex_2_stateless.png new file mode 100644 index 00000000..01787f99 Binary files /dev/null and b/images/trex_2_stateless.png differ diff --git a/wscript b/wscript index c510d721..f7e5024d 100755 --- a/wscript +++ b/wscript @@ -183,6 +183,9 @@ def build(bld): bld(rule=convert_to_pdf_book, source='trex_book.asciidoc waf.css', target='trex_book.pdf', scan=ascii_doc_scan) + + bld(rule=convert_to_pdf_book, + source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless.pdf', scan=ascii_doc_scan) bld(rule=convert_to_pdf_book, -- cgit 1.2.3-korg From 80f774ac6be4e895466488065ca0bdeed7404b7a Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 2 Mar 2016 08:12:45 +0200 Subject: more stateless doc --- draft_trex_stateless.asciidoc | 509 +++++++++++++++++++++++++++++++++++++++--- images/Thumbs.db | Bin 518144 -> 552960 bytes images/stl_tut_pcap_file1.png | Bin 0 -> 3800 bytes 3 files changed, 479 insertions(+), 30 deletions(-) create mode 100644 images/stl_tut_pcap_file1.png diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index a9195d46..7a202c34 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -7,7 +7,7 @@ TRex :numbered: :web_server_url: http://trex-tgn.cisco.com/trex :local_web_server_url: csi-wiki-01:8181/trex -:toclevels: 4 +:toclevels: 6 == Stateless support @@ -19,7 +19,7 @@ TRex * Interface can configured with multi traffic profiles * Profile can support multi streams. Scale to 10K streams in parallel * Each Stream -** Packet template - ability to build any packet using Scapy (e.g. MPLS/Ipv4/Ipv6/GRE/VXLAN/NSH) +** Packet template - ability to build any packet using Scapy (e.g. MPLS/IPv4/Ipv6/GRE/VXLAN/NSH) ** Field engine program *** Ability to change any field inside the packet, for example src_ip = 10.0.0.1-10.0.0.255 *** Ability to change the packet size (e.g. Random packet size 64-9K) @@ -97,7 +97,7 @@ image::images/stateless_objects.png[title="TRex Objects ",align="left",width=600 This tutorial will walk you through basic but complete TRex Stateless use cases that will show you common concepts as well as slightly more advanced ones. -==== Tutorial 1: Simple Ipv4/UDP packet - Simulator +==== Tutorial 1: Simple IPv4/UDP packet - Simulator The following example demonstrates the most basic use case using our simulator. @@ -305,7 +305,7 @@ $./stl-sim -f stl/udp_1pkt_simple.py --pkt 0030 78 78 78 78 xxxx ---- -==== Tutorial 2: Simple Ipv4/UDP packet - TRex +==== Tutorial 2: Simple IPv4/UDP packet - TRex ===== Run TRex as a server mode @@ -446,7 +446,7 @@ Port Statistics ---- -==== Tutorial 3: Simple Ipv4/UDP packet +==== Tutorial 3: Simple IPv4/UDP packet The following example demonstrates @@ -666,14 +666,14 @@ file: `stl/imix.py` The following example demonstrates changing packet fields. The Field Engine (FE) has limited number of instructions/operation for supporting most use cases. There is a plan to add LuaJIT to get 100% flexiable in the cost of performance. -The FE can allocate variable in Stream context. Write a variable to a packet offset, change packet size etc. +The FE can allocate stream variable in Stream context. Write a stream variable to a packet offset, change packet size etc. *Some examples for what can be done:* * Change ipv4.tos 1-10 * Change packet size to be random in range 64-9K * Create range of flows (change src_ip,dest_ip,src_port,dest_port) -* Update Ipv4 checksum +* Update IPv4 checksum for more info see link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_obj_a[here] @@ -718,10 +718,10 @@ file: `stl/syn_attack.py` mode = STLTXCont()) ---- <1> Create SYN packet using Scapy -<2> Define variable name=ip_src, 4 bytes size for IPv4. -<3> Define variable name=src_port, 2 bytes size for port. +<2> Define stream variable name=ip_src, 4 bytes size for IPv4. +<3> Define stream variable name=src_port, 2 bytes size for port. <4> Write ip_src var into `IP.src` packet offset. Scapy calculate the offset. We could gave `IP:1.src" for second IP header in the packet -<5> Fix Ipv4 checksum. here we provide the header name `IP` we could gave `IP:1` for second IP +<5> Fix IPv4 checksum. here we provide the header name `IP` we could gave `IP:1` for second IP <6> Update TCP src port- TCP checksum is not updated here WARNING: Original Scapy does not have the capability to calculate offset for a header/field by name. This offset capability won't work for all the cases because there could be complex cases that Scapy rebuild the header. In such cases put offset as a number @@ -729,22 +729,22 @@ WARNING: Original Scapy does not have the capability to calculate offset for a h The output pcap file field can be seen here .Pcap file output -[format="csv",cols="1^,2^,1^", options="header"] +[format="csv",cols="1^,2^,2^", options="header",width="40%"] |================= pkt,Client IPv4,Client Port - 1 , 17.152.71.218 , 5814 - 2 , 17.7.6.30 , 26810 - 3 , 17.3.32.200 , 1810 + 1 , 17.152.71.218 , 5814 + 2 , 17.7.6.30 , 26810 + 3 , 17.3.32.200 , 1810 4 , 17.135.236.168 , 55810 - 5 , 17.46.240.12 , 1078 - 6 , 16.133.91.247, 2323 + 5 , 17.46.240.12 , 1078 + 6 , 16.133.91.247 , 2323 |================= ==== Tutorial 8: Field Engine, Tuple Generator The following example demonstrates creating multiply flow from the same packet template. -The TupleGenerator instructions are used to create two variables with IP, port +The TupleGenerator instructions are used to create two stream variables with IP, port file: `stl/udp_1pkt_tuple_gen.py` @@ -770,12 +770,12 @@ file: `stl/udp_1pkt_tuple_gen.py` vm = vm) ---- <1> Define struct with two dependent varibles tuple.ip tuple.port -<2> Write tuple.ip to Ipv4 src field offset +<2> Write tuple.ip to IPv4 src field offset <3> Write tuple.port to UDP header. You should set UDP.checksum to zero .Pcap file output -[format="csv",cols="1^,2^,1^", options="header"] +[format="csv",cols="1^,2^,1^", options="header",width="40%"] |================= pkt,Client IPv4,Client Port 1 , 16.0.0.1 , 1025 @@ -788,11 +788,11 @@ pkt,Client IPv4,Client Port * Number of clients are two 16.0.0.1 and 16.0.0.2 * Number of flows is limited to 129020 (2*65535-1025) -* The variable size should match the size of the FlowVarWr instruction +* The stream variable size should match the size of the FlowVarWr instruction ==== Tutorial 9: Field Engine, write to a bit-field packet -The following example demonstrates a way to write a variable to a bit field packet variables. +The following example demonstrates a way to write a stream variable to a bit field packet variables. In this example MPLS label field will be changed. .MPLS header @@ -832,7 +832,7 @@ file: `stl/udp_1pkt_mpls_vm.py` ---- <1> Define varible size of 2 bytes -<2> Write the variable label with a shift of 12 bits and with 20bit MSB mask. Cast the variables of 2 bytes to 4 bytes +<2> Write the stream variable label with a shift of 12 bits and with 20bit MSB mask. Cast the stream variables of 2 bytes to 4 bytes <3> Second MPLS header should be changed @@ -882,7 +882,7 @@ file: `stl/udp_rand_len_9k.py` ] ) ---- -<1> Define a random variable with maximum size of the packet +<1> Define a random stream variable with maximum size of the packet <2> Trim the packet size to the fv_rand value <3> fix ip.len <4> fix udp.len @@ -969,15 +969,17 @@ To send gratuitous ARP from TRex server side for this server (58.0.0.1) [source,python] ---- - def create_stream (self): # create a base packet and pad it to size - base_pkt = Ether(src="00:00:dd:dd:00:01",dst="ff:ff:ff:ff:ff:ff")/ARP(psrc="58.0.0.1",hwsrc="00:00:dd:dd:00:01", hwdst="00:00:dd:dd:00:01", pdst="58.0.0.1") - + base_pkt = Ether(src="00:00:dd:dd:00:01", + dst="ff:ff:ff:ff:ff:ff")/ + ARP(psrc="58.0.0.1", + hwsrc="00:00:dd:dd:00:01", + hwdst="00:00:dd:dd:00:01", + pdst="58.0.0.1") ---- -Then we can send the clients traffic from A->C - +Then traffic can be sent from client side A->C file: `stl/udp_1pkt_range_clients_split.py` @@ -1014,8 +1016,455 @@ class STLS1(object): return STLStream(packet = STLPktBuilder(pkt = base_pkt/pad,vm = vm), mode = STLTXCont( pps=10 )) ---- -<1> Write the variable mac_src with offset of 10 (last 2 bytes of src_mac field) -<2> Write the variable mac_src with `offset_fixup` of 2. beacuse we write it with offset +<1> Write the stream variable mac_src with offset of 10 (last 2 bytes of src_mac field) +<2> Write the stream variable mac_src with `offset_fixup` of 2. beacuse we write it with offset + + +==== Tutorial 12: Field Engine, Split to core + +The following example demonstrates a way to split generated traffic to a number of threads. +Using this feature, there is a way to specify by each field to split the traffic to threads. +Without this feature the traffic is duplicated and all the threads transmits the same traffic. + +===== Without Split + +Let's assume we have two transmitters DP threads + +[source,python] +---- + def create_stream (self): + + # TCP SYN + base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S") + + + # vm + vm = CTRexScRaw( [ STLVmFlowVar(name="ip_src", + min_value="16.0.0.0", + max_value="16.0.0.254", + size=4, op="inc"), <1> + + + STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ), <2> + + STLVmFixIpv4(offset = "IP"), # fix checksum + ] + + ) + +---- +<1> Stream variable +<2> write it to IPv4.src + + +.Variable per thread +[format="csv",cols="1^,3^,3^", options="header",width="40%"] +|================= +pkt, thread-0 ip_src,thread-1 ip_src + 1 , 16.0.0.1 , 16.0.0.1 + 2 , 16.0.0.2 , 16.0.0.2 + 3 , 16.0.0.3 , 16.0.0.3 + 4 , 16.0.0.4 , 16.0.0.4 + 5 , 16.0.0.5 , 16.0.0.5 + 6 , 16.0.0.6, 16.0.0.6 +|================= + +* In this case all the threads transmit the same packets + + +===== With Split feature + +Let's assume we have two transmitters DP threads + +[source,python] +---- + def create_stream (self): + + # TCP SYN + base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S") + + + # vm + vm = CTRexScRaw( [ STLVmFlowVar(name="ip_src", + min_value="16.0.0.0", + max_value="16.0.0.254", + size=4, op="inc"), + + + STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ), + + STLVmFixIpv4(offset = "IP"), # fix checksum + ] + ,split_by_field = "ip_src" <1> + ) + +---- +<1> The same example but now we with split by `ip_src` stream variable + +.Variable per thread +[format="csv",cols="1^,3^,3^", options="header",width="40%"] +|================= +pkt, thread-0 ip_src ,thread-1 ip_src + 1 , 16.0.0.1 , 16.0.0.128 + 2 , 16.0.0.2 , 16.0.0.129 + 3 , 16.0.0.3 , 16.0.0.130 + 4 , 16.0.0.4 , 16.0.0.131 + 5 , 16.0.0.5 , 16.0.0.132 + 6 , 16.0.0.6, 16.0.0.133 +|================= + +* In this case the stream variable is split + +To simulate it you can run the following command, let's take the file `stl/udp_1pkt_range_clients_split.py` and simulate it + +[source,bash] +---- +$./stl-sim -f stl/udp_1pkt_range_clients_split.py -o a.pcap -c 2 -l 10 #<1> +---- +<1> simulate 2 threads -c 2 + + +.Variable per thread +[format="csv",cols="1^,3^,3^", options="header",width="40%"] +|================= +pkt, thread-0 ip_src,thread-1 ip_src + 1 , 55.55.0.1 , 55.55.58.153 + 2 , 55.55.0.2 , 55.55.58.154 + 3 , 55.55.0.3 , 55.55.58.155 + 4 , 55.55.0.4 , 55.55.58.156 + 5 , 55.55.0.5 , 55.55.58.157 + 6 , 55.55.0.6 , 55.55.58.158 +|================= + + + +===== Some rules about Split stream varibles and burst/multi-burst + +* In case of burst/multi-burst the number of packets are split to number of threads in *default* there is no need an explict split +* When the number of packets in a burst is smaller than the number of threads only one thread will do the work. +* In case there is stream with burst of *1* packet, only the first DP thread will do the work. + + +==== Tutorial 13: Pcap file to *one* stream + +There is a way to load *one* packet data into a stream. There is an assumption that this pcap. only the first packet from this pcap is taken. + +file: `stl/udp_1pkt_pcap.py` + +[source,python] +---- + + def get_streams (self, direction = 0): + return [STLStream(packet = + STLPktBuilder(pkt ="stl/yaml/udp_64B_no_crc.pcap"), # path relative to pwd <1> + mode = STLTXCont(pps=10)) ] + +---- +<1> packet is taken from pcap file relative to pwd of the script you run + + +file: `stl/udp_1pkt_pcap_relative_path.py` + + +[source,python] +---- + + def get_streams (self, direction = 0): + return [STLStream(packet = STLPktBuilder(pkt ="yaml/udp_64B_no_crc.pcap", + path_relative_to_profile = True), <1> + mode = STLTXCont(pps=10)) ] + +---- +<1> packet is taken from pcap file relative to *profile* file location + +==== Tutorial 14: Pcap file to many streams + +The following example demonstrates a way to load pcap with *number* of packets and for each packet create a stream with burst of 1. + +file: `stl/pcap.py` + +[source,python] +---- + def get_streams (self, + ipg_usec = 10.0, <1> + loop_count = 1): <2> + + profile = STLProfile.load_pcap(self.pcap_file, <3> + ipg_usec = ipg_usec, + loop_count = loop_count) +---- +<1> The inter stream gap in usec +<2> How many times to loop +<3> the pcap file + + +image::images/stl_tut_pcap_file1.png[title="pcap file",align="left",width=300, link="images/stl_tut_pcap_file1.png"] + +This figure illustrates how the streams look like for pcap file with 3 packet. +* Each stream is configured to burst with one packet +* Each stream point to the next stream. +* The last stream point to the first with action_loop=loop_count in case it was asked (>1) +The profile will run on only one DP thread because it has burst with one packet (see Split example) + +Running this example + +[source,bash] +---- +./stl-sim -f stl/pcap.py --yaml +---- + +will give this + +[source,python] +---- +csi-kiwi-02]> ./stl-sim -f stl/pcap.py --yaml +- name: 1 + next: 2 <1> + stream: + action_count: 0 + enabled: true + flags: 0 + isg: 10.0 + mode: + percentage: 100 + total_pkts: 1 + type: single_burst + packet: + meta: '' + rx_stats: + enabled: false + self_start: true + vm: + instructions: [] + split_by_var: '' +- name: 2 + next: 3 + stream: + action_count: 0 + enabled: true + flags: 0 + isg: 10.0 + mode: + percentage: 100 + total_pkts: 1 + type: single_burst + packet: + meta: '' + rx_stats: + enabled: false + self_start: false + vm: + instructions: [] + split_by_var: '' +- name: 3 + next: 4 + stream: + action_count: 0 + enabled: true + flags: 0 + isg: 10.0 + mode: + percentage: 100 + total_pkts: 1 + type: single_burst + packet: + meta: '' + rx_stats: + enabled: false + self_start: false + vm: + instructions: [] + split_by_var: '' +- name: 4 + next: 5 + stream: + action_count: 0 + enabled: true + flags: 0 + isg: 10.0 + mode: + percentage: 100 + total_pkts: 1 + type: single_burst + packet: + meta: '' + rx_stats: + enabled: false + self_start: false + vm: + instructions: [] + split_by_var: '' +- name: 5 + next: 1 <2> + stream: + action_count: 1 <3> + enabled: true + flags: 0 + isg: 10.0 + mode: + percentage: 100 + total_pkts: 1 + type: single_burst + packet: + meta: '' + rx_stats: + enabled: false + self_start: false <4> + vm: + instructions: [] + split_by_var: '' +---- +<1> each stream point to the next stream +<2> last point to the first +<3> the number of loop is given in `action_count: 1` +<4> self_start is disabled for all the streams except the first one + + +==== Tutorial 15: Pcap file to many streams and Field Engine + +The following example demonstrates a way to load pcap file to many stream and attach to each stream a Field Engine program. +For example change the IP.src of all the streams to a random number + +file: `stl/pcap_with_vm.py` + +[source,python] +---- + + def create_vm (self, ip_src_range, ip_dst_range): + if not ip_src_range and not ip_dst_range: + return None + + # until the feature of offsets will be fixed for PCAP use hard coded offsets + + vm = [] + + if ip_src_range: + vm += [STLVmFlowVar(name="src", + min_value = ip_src_range['start'], + max_value = ip_src_range['end'], + size = 4, op = "inc"), + #STLVmWrFlowVar(fv_name="src",pkt_offset= "IP.src") + STLVmWrFlowVar(fv_name="src",pkt_offset = 26) + ] + + if ip_dst_range: + vm += [STLVmFlowVar(name="dst", + min_value = ip_dst_range['start'], + max_value = ip_dst_range['end'], + size = 4, op = "inc"), + + #STLVmWrFlowVar(fv_name="dst",pkt_offset= "IP.dst") + STLVmWrFlowVar(fv_name="dst",pkt_offset = 30) + ] + + vm += [#STLVmFixIpv4(offset = "IP") + STLVmFixIpv4(offset = 14) + ] + + return vm + + + def get_streams (self, + ipg_usec = 10.0, + loop_count = 5, + ip_src_range = None, + ip_dst_range = {'start' : '10.0.0.1', + 'end': '10.0.0.254'}): + + vm = self.create_vm(ip_src_range, ip_dst_range) <1> + profile = STLProfile.load_pcap(self.pcap_file, + ipg_usec = ipg_usec, + loop_count = loop_count, + vm = vm) <2> + + return profile.get_streams() +---- +<1> Create Field Engine program, +<2> Apply to all the packets -> convert to streams + +.Output +[format="csv",cols="1^,2^,1^", options="header",width="40%"] +|================= +pkt, IPv4 , flow + 1 , 10.0.0.1, 1 + 2 , 10.0.0.1, 1 + 3 , 10.0.0.1, 1 + 4 , 10.0.0.1, 1 + 5 , 10.0.0.1, 1 + 6 , 10.0.0.1, 1 + 7 , 10.0.0.2, 2 + 8 , 10.0.0.2, 2 + 9 , 10.0.0.2, 2 + 10 , 10.0.0.2,2 + 11 , 10.0.0.2,2 + 12 , 10.0.0.2,2 +|================= + + +==== Tutorial 16: Source and Destination MAC address + +Each TRex port has a source MAC configure and destination MAC (DUT) configured in /etc/trex_cfg.yaml +By default those MAC (source and destination) is taken +In case a user configures a source or destination MAC explicitly this MAC will override + + +.MAC addrees +[format="csv",cols="2^,2^,2^", options="header",width="40%"] +|================= +Scapy , Source MAC,Destination MAC +Ether() , trex_cfg,trex_cfg +Ether(src="00:bb:12:34:56:01"),"00:bb:12:34:56:01",trex_cfg +Ether(dst="00:bb:12:34:56:01"),trex_cfg,"00:bb:12:34:56:01" +|================= + +For example + +file: `stl/udp_1pkt_1mac_override.py` + + +[source,python] +---- + def create_stream (self): + + base_pkt = Ether(src="00:bb:12:34:56:01")/ <1> + IP(src="16.0.0.1",dst="48.0.0.1")/ + UDP(dport=12,sport=1025) +---- +<1> Don't take TRex port src interface MAC + +==== Tutorial 17: Teredo tunnel (IPv6 over IPv4) + +The following example demonstrates creating IPv6 packet inside IPv4 packet and create a range of IPs + +file: `stl/udp_1pkt_ipv6_in_ipv4.py` + +[source,python] +---- + def create_stream (self): + # Teredo Ipv6 over Ipv4 + pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/ + UDP(dport=3797,sport=3544)/ + IPv6(dst="2001:0:4137:9350:8000:f12a:b9c8:2815", + src="2001:4860:0:2001::68")/ + UDP(dport=12,sport=1025)/ICMPv6Unknown() + + vm = CTRexScRaw( [ + # tuple gen for inner Ipv6 + STLVmTupleGen ( ip_min="16.0.0.1", ip_max="16.0.0.2", + port_min=1025, port_max=65535, + name="tuple"), <1> + + STLVmWrFlowVar (fv_name="tuple.ip", + pkt_offset= "IPv6.src", + offset_fixup=12 ), <2> + STLVmWrFlowVar (fv_name="tuple.port", + pkt_offset= "UDP:1.sport" ) <3> + ] + ) +---- +<1> Define stream struct name tuple. it has tuple.ip, tuple.port variables +<2> Write stream tuple.ip variable into IPv6.src offset and fixup with 12 bytes (only 4 LSB) +<3> Write stream tuple.port variable into the second UDP header diff --git a/images/Thumbs.db b/images/Thumbs.db index 309dd91e..175b366f 100755 Binary files a/images/Thumbs.db and b/images/Thumbs.db differ diff --git a/images/stl_tut_pcap_file1.png b/images/stl_tut_pcap_file1.png new file mode 100644 index 00000000..1e4be64e Binary files /dev/null and b/images/stl_tut_pcap_file1.png differ -- cgit 1.2.3-korg From 05ce5bae1c2fc57dc7a7b468458e26d763990c60 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 3 Mar 2016 11:46:36 +0200 Subject: add API sample --- draft_trex_stateless.asciidoc | 565 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 562 insertions(+), 3 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 7a202c34..be7295b2 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -796,7 +796,7 @@ The following example demonstrates a way to write a stream variable to a bit fie In this example MPLS label field will be changed. .MPLS header -[cols="32", halign="center"] +[cols="32", halign="center",width="50%"] |==== 20+<|Label 3+<|TC 1+<|S 8+<|TTL| 0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1| @@ -1217,7 +1217,7 @@ will give this [source,python] ---- -csi-kiwi-02]> ./stl-sim -f stl/pcap.py --yaml +$./stl-sim -f stl/pcap.py --yaml - name: 1 next: 2 <1> stream: @@ -1467,6 +1467,421 @@ file: `stl/udp_1pkt_ipv6_in_ipv4.py` <3> Write stream tuple.port variable into the second UDP header +==== Tutorial 18: Mask instruction + +The STLVmWrMaskFlowVar is a handy command. The pseudocode is a folow + +.Pseudocode +[source,bash] +---- + uint32_t val=(cast_to_size)rd_from_varible("name"); # read flow-var + val+=m_add_value; # add value + + if (m_shift>0) { # shift + val=val<>(-m_shift); + } + } + + pkt_val=rd_from_pkt(pkt_offset) # RMW + pkt_val = (pkt_val & ~m_mask) | (val & m_mask) + wr_to_pkt(pkt_offset,pkt_val) +---- + + +===== Example 1 + +[source,python] +---- + vm = CTRexScRaw( [ STLVmFlowVar(name="mac_src", + min_value=1, + max_value=30, + size=2, op="dec",step=1), + STLVmWrMaskFlowVar(fv_name="mac_src", + pkt_offset= 11, + pkt_cast_size=1, + mask=0xff) # mask command ->write it as one byte + ] + ) + +---- + +This will cast stream variable with 2 byte to be 1 byte + +===== Example 2 + +[source,python] +---- + + vm = CTRexScRaw( [ STLVmFlowVar(name="mac_src", + min_value=1, + max_value=30, + size=2, op="dec",step=1), + STLVmWrMaskFlowVar(fv_name="mac_src", + pkt_offset= 10, + pkt_cast_size=2, + mask=0xff00, + shift=8) # take the var shift it 8 (x256) write only to LSB + ] + ) +---- + +The output will be shift by 8 + +.Output +[format="csv",cols="1^", options="header",width="20%"] +|================= + value + 0x0100 + 0x0200 + 0x0300 +|================= + +===== Example 3 + +[source,python] +---- + vm = CTRexScRaw( [ STLVmFlowVar(name="mac_src", + min_value=1, + max_value=30, + size=2, + op="dec",step=1), + STLVmWrMaskFlowVar(fv_name="mac_src", + pkt_offset= 10, + pkt_cast_size=1, + mask=0x1, + shift=-1) <1> + ] + ) + +---- +<1> take var mac_src>>1 and write the LSB every two packet there should be a change + +.Output +[format="csv",cols="1^", options="header",width="20%"] +|================= +value + 0x00 + 0x00 + 0x01 + 0x01 + 0x00 + 0x00 + 0x01 + 0x01 +|================= + + +=== Tutorials HLT profile + +HLTAPI is a Cisco standard API for traffic generation.IXIA and Spirent support this standard. traffic_config API has set of arguments for specifying the packet, how to send it and what field to change while sending it. +We created a Python module that you can specify the traffic profile in HLT like format and load it as native profile for smooth transition . +Under the hood there is a compiler that converts it to native scapy/field engine instruction +The support is limited to [TBD] this argument. + + +file: `stl/hlt/hlt_udp_inc_dec_len_9k.py` + +[source,python] +---- + +class STLS1(object): + ''' + Create 2 Eth/IP/UDP steams with different packet size: + First stream will start from 64 bytes (default) and will increase until max_size (9,216) + Seconds stream will decrease the packet size in reverse way + ''' + + def create_streams (self): + max_size = 9*1024 + return [STLHltStream(length_mode = 'increment', + frame_size_max = max_size, + l3_protocol = 'ipv4', + ip_src_addr = '16.0.0.1', + ip_dst_addr = '48.0.0.1', + l4_protocol = 'udp', + udp_src_port = 1025, + udp_dst_port = 12, + rate_pps = 1, + ), + STLHltStream(length_mode = 'decrement', + frame_size_max = max_size, + l3_protocol = 'ipv4', + ip_src_addr = '16.0.0.1', + ip_dst_addr = '48.0.0.1', + l4_protocol = 'udp', + udp_src_port = 1025, + udp_dst_port = 12, + rate_pps = 1, + ) + ] + + def get_streams (self, direction = 0): + return self.create_streams() +---- + +This profile can be run with the simulator to generate pcap file + +[source,bash] +---- +$ ./stl-sim -f stl/hlt/hlt_udp_inc_dec_len_9k.py -o b.pcap -l 10 +---- + +It can be converted to native json or YAML + +[source,bash] +---- +$ ./stl-sim -f stl/hlt/hlt_udp_inc_dec_len_9k.py --josn +---- + +or converted to native Python native Scapy/FE using this command + +[source,bash] +---- +$ ./stl-sim -f stl/hlt/hlt_udp_inc_dec_len_9k.py --native +---- + +to run it using using the TRex Console + +[source,bash] +---- +TRex>start -f stl/hlt/hlt_udp_inc_dec_len_9k.py -m 10mbps -a +---- + + +more profiles and example can be found in `stl/hlt` folder + + +=== Tutorials Native Python API + +==== Tutorial 1: + +Python API examples are located here: `automation/trex_control_plane/stl/examples` +Python API library is located here: `automation/trex_control_plane/stl/trex_stl_lib` + +The Console is using the library to interact with TRex server and protocol is JSON-RPC2 over ZMQ + +file: `stl_bi_dir_flows.py` + + +[source,python] +---- + +def simple_burst (): + + # create client + c = STLClient() + passed = True + + try: + # turn this on for some information + #c.set_verbose("high") + + # create two streams + s1 = STLStream(packet = create_pkt(200, 0), + mode = STLTXCont(pps = 100)) + + # second stream with a phase of 1ms (inter stream gap) + s2 = STLStream(packet = create_pkt(200, 1), + isg = 1000, + mode = STLTXCont(pps = 100)) + + + # connect to server + c.connect() + + # prepare our ports (my machine has 0 <--> 1 with static route) + c.reset(ports = [0, 1]) # it will Acquire port 0,1 + + # add both streams to ports + c.add_streams(s1, ports = [0]) + c.add_streams(s2, ports = [1]) + + # clear the stats before injecting + c.clear_stats() + + # choose rate and start traffic for 10 seconds on 5 mpps + print "Running 5 Mpps on ports 0, 1 for 10 seconds..." + c.start(ports = [0, 1], mult = "5mpps", duration = 10) <1> + + # block until done + c.wait_on_traffic(ports = [0, 1]) + + # read the stats after the test + stats = c.get_stats() + + print json.dumps(stats[0], indent = 4, separators=(',', ': '), sort_keys = True) + print json.dumps(stats[1], indent = 4, separators=(',', ': '), sort_keys = True) + + lost_a = stats[0]["opackets"] - stats[1]["ipackets"] + lost_b = stats[1]["opackets"] - stats[0]["ipackets"] + + print "\npackets lost from 0 --> 1: {0} pkts".format(lost_a) + print "packets lost from 1 --> 0: {0} pkts".format(lost_b) + + if (lost_a == 0) and (lost_b == 0): + passed = True + else: + passed = False + + except STLError as e: + passed = False + print e + + finally: + c.disconnect() + + if passed: + print "\nTest has passed :-)\n" + else: + print "\nTest has failed :-(\n" + + +# run the tests +simple_burst() +---- +<1> Start can work on mask of ports + + +=== Tutorials HLT Python API + + + +HLT Python API is a layer on top the native layer. it support + +* Device Control +** connect +** cleanup_session +** device_info +** info +* Interface +** interface_config +** interface_stats +* Traffic +** traffic_config - not all arguments are supported +** traffic_control +** traffic_stats + + +file: `hlt_udp_simple.py` + + +[source,python] +---- + +import sys +import argparse +import stl_path +from trex_stl_lib.api import * <1> +from trex_stl_lib.trex_stl_hltapi import * <2> + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(usage=""" + Connect to TRex and send burst of packets + + examples + + hlt_udp_simple.py -s 9000 -d 30 + + hlt_udp_simple.py -s 9000 -d 30 -rate_percent 10 + + hlt_udp_simple.py -s 300 -d 30 -rate_pps 5000000 + + hlt_udp_simple.py -s 800 -d 30 -rate_bps 500000000 --debug + + then run the simulator on the output + ./stl-sim -f example.yaml -o a.pcap ==> a.pcap include the packet + + """, + description="Example for TRex HLTAPI", + epilog=" based on hhaim's stl_run_udp_simple example"); + + parser.add_argument("--ip", + dest="ip", + help='Remote trex ip', + default="127.0.0.1", + type = str) + + parser.add_argument("-s", "--frame-size", + dest="frame_size", + help='L2 frame size in bytes without FCS', + default=60, + type = int,) + + parser.add_argument('-d','--duration', + dest='duration', + help='duration in second ', + default=10, + type = int,) + + parser.add_argument('--rate-pps', + dest='rate_pps', + help='speed in pps', + default="100") + + parser.add_argument('--src', + dest='src_mac', + help='src MAC', + default='00:50:56:b9:de:75') + + parser.add_argument('--dst', + dest='dst_mac', + help='dst MAC', + default='00:50:56:b9:34:f3') + + args = parser.parse_args(); + + hltapi = CTRexHltApi() + print 'Connecting to TRex' + res = hltapi.connect(device = args.ip, port_list = [0, 1], reset = True, break_locks = True) + check_res(res) + ports = res['port_handle'] + if len(ports) < 2: + error('Should have at least 2 ports for this test') + print 'Connected, acquired ports: %s' % ports + + print 'Creating traffic' + + res = hltapi.traffic_config(mode = 'create', bidirectional = True, + port_handle = ports[0], port_handle2 = ports[1], + frame_size = args.frame_size, + mac_src = args.src_mac, mac_dst = args.dst_mac, + mac_src2 = args.dst_mac, mac_dst2 = args.src_mac, + l3_protocol = 'ipv4', + ip_src_addr = '10.0.0.1', ip_src_mode = 'increment', ip_src_count = 254, + ip_dst_addr = '8.0.0.1', ip_dst_mode = 'increment', ip_dst_count = 254, + l4_protocol = 'udp', + udp_dst_port = 12, udp_src_port = 1025, + stream_id = 1, # temporary workaround, add_stream does not return stream_id + rate_pps = args.rate_pps, + ) + check_res(res) + + print 'Starting traffic' + res = hltapi.traffic_control(action = 'run', port_handle = ports[:2]) + check_res(res) + wait_with_progress(args.duration) + + print 'Stopping traffic' + res = hltapi.traffic_control(action = 'stop', port_handle = ports[:2]) + check_res(res) + + res = hltapi.traffic_stats(mode = 'aggregate', port_handle = ports[:2]) + check_res(res) + print_brief_stats(res) + + res = hltapi.cleanup_session(port_handle = 'all') + check_res(res) + + print 'Done' +---- +<1> import Native TRex API +<2> import HLT TRex + + === Reference @@ -1480,9 +1895,153 @@ file: `stl/udp_1pkt_ipv6_in_ipv4.py` === Console commands -=== Python API +=== Appendix + + +==== HLT supported Arguments +[source,python] +---- + +traffic_config_kwargs = { + 'mode': None, # ( create | modify | remove | reset ) + 'split_by_cores': 'split', # ( split | duplicate | single ) TRex extention: split = split traffic by cores, duplicate = duplicate traffic for all cores, single = run only with sinle core (not implemented yet) + 'consistent_random': False, # TRex extention: False (default): random sequence will be different every run, True: random sequence will be same every run + 'port_handle': None, + 'port_handle2': None, + # stream builder parameters + 'transmit_mode': 'continuous', # ( continuous | multi_burst | single_burst ) + 'rate_pps': None, + 'rate_bps': None, + 'rate_percent': 10, + 'stream_id': None, + 'name': None, + 'bidirectional': 0, + 'direction': 0, # ( 0 | 1 ) TRex extention: 1 = exchange sources and destinations + 'pkts_per_burst': 1, + 'burst_loop_count': 1, + 'inter_burst_gap': 12, + 'length_mode': 'fixed', # ( auto | fixed | increment | decrement | random | imix ) + 'l3_imix1_size': 60, + 'l3_imix1_ratio': 28, + 'l3_imix2_size': 590, + 'l3_imix2_ratio': 20, + 'l3_imix3_size': 1514, + 'l3_imix3_ratio': 4, + 'l3_imix4_size': 9226, + 'l3_imix4_ratio': 0, + #L2 + 'frame_size': 64, + 'frame_size_min': 64, + 'frame_size_max': 64, + 'frame_size_step': 1, + 'l2_encap': 'ethernet_ii', # ( ethernet_ii | ethernet_ii_vlan ) + 'mac_src': '00:00:01:00:00:01', + 'mac_dst': '00:00:00:00:00:00', + 'mac_src2': '00:00:01:00:00:01', + 'mac_dst2': '00:00:00:00:00:00', + 'mac_src_mode': 'fixed', # ( fixed | increment | decrement | random ) + 'mac_src_step': 1, + 'mac_src_count': 1, + 'mac_dst_mode': 'fixed', # ( fixed | increment | decrement | random ) + 'mac_dst_step': 1, + 'mac_dst_count': 1, + 'mac_src2_mode': 'fixed', # ( fixed | increment | decrement | random ) + 'mac_src2_step': 1, + 'mac_src2_count': 1, + 'mac_dst2_mode': 'fixed', # ( fixed | increment | decrement | random ) + 'mac_dst2_step': 1, + 'mac_dst2_count': 1, + # vlan options below can have multiple values for nested Dot1Q headers + 'vlan_user_priority': 1, + 'vlan_priority_mode': 'fixed', # ( fixed | increment | decrement | random ) + 'vlan_priority_count': 1, + 'vlan_priority_step': 1, + 'vlan_id': 0, + 'vlan_id_mode': 'fixed', # ( fixed | increment | decrement | random ) + 'vlan_id_count': 1, + 'vlan_id_step': 1, + 'vlan_cfi': 1, + 'vlan_protocol_tag_id': None, + #L3, general + 'l3_protocol': None, # ( ipv4 | ipv6 ) + 'l3_length_min': 110, + 'l3_length_max': 238, + 'l3_length_step': 1, + #L3, IPv4 + 'ip_precedence': 0, + 'ip_tos_field': 0, + 'ip_mbz': 0, + 'ip_delay': 0, + 'ip_throughput': 0, + 'ip_reliability': 0, + 'ip_cost': 0, + 'ip_reserved': 0, + 'ip_dscp': 0, + 'ip_cu': 0, + 'l3_length': None, + 'ip_id': 0, + 'ip_fragment_offset': 0, + 'ip_ttl': 64, + 'ip_checksum': None, + 'ip_src_addr': '0.0.0.0', + 'ip_dst_addr': '192.0.0.1', + 'ip_src_mode': 'fixed', # ( fixed | increment | decrement | random ) + 'ip_src_step': 1, # ip or number + 'ip_src_count': 1, + 'ip_dst_mode': 'fixed', # ( fixed | increment | decrement | random ) + 'ip_dst_step': 1, # ip or number + 'ip_dst_count': 1, + #L3, IPv6 + 'ipv6_traffic_class': 0, + 'ipv6_flow_label': 0, + 'ipv6_length': None, + 'ipv6_next_header': None, + 'ipv6_hop_limit': 64, + 'ipv6_src_addr': 'fe80:0:0:0:0:0:0:12', + 'ipv6_dst_addr': 'fe80:0:0:0:0:0:0:22', + 'ipv6_src_mode': 'fixed', # ( fixed | increment | decrement | random ) + 'ipv6_src_step': 1, # we are changing only 32 lowest bits; can be ipv6 or number + 'ipv6_src_count': 1, + 'ipv6_dst_mode': 'fixed', # ( fixed | increment | decrement | random ) + 'ipv6_dst_step': 1, # we are changing only 32 lowest bits; can be ipv6 or number + 'ipv6_dst_count': 1, + #L4, TCP + 'l4_protocol': None, # ( tcp | udp ) + 'tcp_src_port': 1024, + 'tcp_dst_port': 80, + 'tcp_seq_num': 1, + 'tcp_ack_num': 1, + 'tcp_data_offset': 5, + 'tcp_fin_flag': 0, + 'tcp_syn_flag': 0, + 'tcp_rst_flag': 0, + 'tcp_psh_flag': 0, + 'tcp_ack_flag': 0, + 'tcp_urg_flag': 0, + 'tcp_window': 4069, + 'tcp_checksum': None, + 'tcp_urgent_ptr': 0, + 'tcp_src_port_mode': 'increment', # ( increment | decrement | random ) + 'tcp_src_port_step': 1, + 'tcp_src_port_count': 1, + 'tcp_dst_port_mode': 'increment', # ( increment | decrement | random ) + 'tcp_dst_port_step': 1, + 'tcp_dst_port_count': 1, + # L4, UDP + 'udp_src_port': 1024, + 'udp_dst_port': 80, + 'udp_length': None, + 'udp_dst_port_mode': 'increment', # ( increment | decrement | random ) + 'udp_src_port_step': 1, + 'udp_src_port_count': 1, + 'udp_src_port_mode': 'increment', # ( increment | decrement | random ) + 'udp_dst_port_step': 1, + 'udp_dst_port_count': 1, +} + +---- -- cgit 1.2.3-korg From abf927bcfde6e5cf11b54cbfab233daf84d649f1 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 3 Mar 2016 18:05:14 +0200 Subject: add more info --- draft_trex_stateless.asciidoc | 767 ++++++++++++++++++++++++++++++++-- images/Thumbs.db | Bin 552960 -> 561152 bytes images/stl_barrier.png | Bin 0 -> 26139 bytes images/stl_null_stream.png | Bin 0 -> 2843 bytes images/trex_2_stateless.png | Bin 1081192 -> 44004 bytes images/trex_stateless_multi_user.png | Bin 0 -> 64465 bytes visio_drawings/trex_2.0_stateless.vsd | Bin 966656 -> 1536000 bytes 7 files changed, 735 insertions(+), 32 deletions(-) create mode 100644 images/stl_barrier.png create mode 100644 images/stl_null_stream.png create mode 100644 images/trex_stateless_multi_user.png diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index be7295b2..90bf72c0 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -10,7 +10,7 @@ TRex :toclevels: 6 -== Stateless support +== Stateless support (Alpha stage) === High level functionality @@ -23,7 +23,6 @@ TRex ** Field engine program *** Ability to change any field inside the packet, for example src_ip = 10.0.0.1-10.0.0.255 *** Ability to change the packet size (e.g. Random packet size 64-9K) - ** Mode -Continues/Burst/Multi burst support ** Rate can be specified in: *** Packet per second -(e.g. 14MPPS) @@ -57,7 +56,7 @@ image::images/stl_streams_example.png[title="Streams example",align="left",width === RPC Architecture -To support interactive mode, JSON-RPC2 server added to the Control Plane +To support interactive mode, JSON-RPC2 thread added to the TRex Control Plane core. The following diagram illustrates the RPC server component's @@ -68,15 +67,23 @@ image::images/trex_2_stateless.png[title="RPC Server Position",align="left",widt * Async transport is ZMQ working SUB/PUB mode. It is for async event such as interface change mode, counters etc. * Python is the first Client to implement the Python automation API * Console utilizes the Python API to implement a user interface to TRex +* Number of users can control one TRex server in parallel as long as they control different Interfaces. TRex Interface can be acquired by a user. For example a TRex with four ports can be used by two users. User A can acquire Interface 0/ 1 and User B can acquire Interface 3/4 +* There could be only *one* Console/GUI control (R/W) entity for specific interfaces. So user A with two interfaces could have only one R/W Control session in specific time. By that we can cache the TRex Server interface information in Client Core. +* For one user there could be many read-only clients for getting statistics for same user same interfaces. +* Client should sync with the server to get the state in connection time and cache the server information locally once the state was changed +* In case of crash/exit of the Client it should sync again at connection time +* Client in R/W mode has the ability to get a statistic in real time (with ASYNC ZMQ). It gives the option to have number of ways to look into the statistics (GUI and Console) at the same time. + + +image::images/trex_stateless_multi_user.png[title="Multi user-per interface",align="left",width=800, link="images/trex_stateless_multi_user.png"] For more detailed see RPC specification link:trex_rpc_server_spec.html[here] This Architecture provides the following advantages: -* Fast interaction with TRex server. very fast load/start/stop profiles to an interface. +* Fast interaction with TRex server. very fast load/start/stop profiles to an interface (~2000 cycles/sec for load/start/stop profile) * Leveraging Python/Scapy for building a packet/Field engine -* HLTAPI compiler is done in Python. - +* HLTAPI compiler complexity is done in Python === Objects @@ -97,7 +104,7 @@ image::images/stateless_objects.png[title="TRex Objects ",align="left",width=600 This tutorial will walk you through basic but complete TRex Stateless use cases that will show you common concepts as well as slightly more advanced ones. -==== Tutorial 1: Simple IPv4/UDP packet - Simulator +==== Tutorial: Simple IPv4/UDP packet - Simulator The following example demonstrates the most basic use case using our simulator. @@ -305,7 +312,7 @@ $./stl-sim -f stl/udp_1pkt_simple.py --pkt 0030 78 78 78 78 xxxx ---- -==== Tutorial 2: Simple IPv4/UDP packet - TRex +==== Tutorial: Simple IPv4/UDP packet - TRex ===== Run TRex as a server mode @@ -446,7 +453,7 @@ Port Statistics ---- -==== Tutorial 3: Simple IPv4/UDP packet +==== Tutorial: Simple IPv4/UDP packet The following example demonstrates @@ -509,7 +516,7 @@ TRex>start -f stl/stl/burst_3pkt_600pkt.py --port 0 ---- -==== Tutorial 4: Multi Burst mode +==== Tutorial: Multi Burst mode file: `stl/multi_burst_2st_1000pkt.py` @@ -551,7 +558,7 @@ file: `stl/multi_burst_2st_1000pkt.py` image::images/stl_tut_4.png[title="Streams example",align="left",width=600, link="images/stl_tut_4.png"] -==== Tutorial 5: Loops +==== Tutorial: Loops of streams file: `stl/burst_3st_loop_x_times.py` @@ -592,7 +599,7 @@ file: `stl/burst_3st_loop_x_times.py` <1> go back to S0 but limit it to 2 loops -==== Tutorial 6: IMIX with UDP packets directional +==== Tutorial: IMIX with UDP packets directional file: `stl/imix.py` @@ -662,7 +669,7 @@ file: `stl/imix.py` <3> We didn't explain this yet. but this is a Field Engine program to change fields inside the packets -==== Tutorial 7: Field Engine, Syn attack +==== Tutorial: Field Engine, Syn attack The following example demonstrates changing packet fields. The Field Engine (FE) has limited number of instructions/operation for supporting most use cases. There is a plan to add LuaJIT to get 100% flexiable in the cost of performance. @@ -741,7 +748,7 @@ pkt,Client IPv4,Client Port |================= -==== Tutorial 8: Field Engine, Tuple Generator +==== Tutorial: Field Engine, Tuple Generator The following example demonstrates creating multiply flow from the same packet template. The TupleGenerator instructions are used to create two stream variables with IP, port @@ -790,7 +797,7 @@ pkt,Client IPv4,Client Port * Number of flows is limited to 129020 (2*65535-1025) * The stream variable size should match the size of the FlowVarWr instruction -==== Tutorial 9: Field Engine, write to a bit-field packet +==== Tutorial: Field Engine, write to a bit-field packet The following example demonstrates a way to write a stream variable to a bit field packet variables. In this example MPLS label field will be changed. @@ -836,7 +843,7 @@ file: `stl/udp_1pkt_mpls_vm.py` <3> Second MPLS header should be changed -==== Tutorial 10: Field Engine, Random packet size +==== Tutorial: Field Engine, Random packet size The following example demonstrates a way to to change packet size to be a random size. The way to do it is: @@ -888,7 +895,7 @@ file: `stl/udp_rand_len_9k.py` <4> fix udp.len -==== Tutorial 11: New Scapy header +==== Tutorial: New Scapy header The following example demonstrates a way to use a header the is not supported by Scapy. In this case this is VXLAN @@ -945,7 +952,7 @@ class STLS1(object): For more information how to define headers see Scapy link:http://www.secdev.org/projects/scapy/doc/build_dissect.html[here] -==== Tutorial 12: Field Engine, Many clients +==== Tutorial: Field Engine, Many clients The following example demonstrates a way to generate traffic from many clients with different IP/MAC to one server. The following figure demonstrate what e want to achieve @@ -1020,7 +1027,7 @@ class STLS1(object): <2> Write the stream variable mac_src with `offset_fixup` of 2. beacuse we write it with offset -==== Tutorial 12: Field Engine, Split to core +==== Tutorial: Field Engine, Split to core The following example demonstrates a way to split generated traffic to a number of threads. Using this feature, there is a way to specify by each field to split the traffic to threads. @@ -1144,8 +1151,165 @@ pkt, thread-0 ip_src,thread-1 ip_src * When the number of packets in a burst is smaller than the number of threads only one thread will do the work. * In case there is stream with burst of *1* packet, only the first DP thread will do the work. +==== Tutorial: Field Engine, Split to core with Burst + +The following example demonstrates a way to split generated traffic to a number of threads in the case that we are using Burst stream. +In both cases the number of packets would be split into threads. +Using this feature, The Field engine will be split too. + +===== Without Split + +In this example: + +* Number of threads are two +* Split is not configured + + +[source,python] +---- +# no split +class STLS1(object): + """ attack 48.0.0.1 at port 80 + """ + + def __init__ (self): + self.max_pkt_size_l3 =9*1024; + + def create_stream (self): + + base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S") + + vm = CTRexScRaw( [ STLVmFlowVar(name="ip_src", <1> + min_value="16.0.0.0", + max_value="18.0.0.254", + size=4, op="inc"), + + STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ), <2> + + STLVmFixIpv4(offset = "IP"), # fix checksum + ] + ) + + pkt = STLPktBuilder(pkt = base_pkt, + vm = vm) + + return STLStream(packet = pkt, + mode = STLTXSingleBurst(total_pkts = 20)) <3> + +---- +<1> Stream variable +<2> write it to IPv4.src +<3> burst of 20 packets + +.Variable per thread +[format="csv",cols="1^,3^,3^", options="header",width="40%"] +|================= +pkt, thread-0 ip_src,thread-1 ip_src + 1 , 16.0.0.1 , 16.0.0.1 + 2 , 16.0.0.2 , 16.0.0.2 + 3 , 16.0.0.3 , 16.0.0.3 + 4 , 16.0.0.4 , 16.0.0.4 + 5 , 16.0.0.5 , 16.0.0.5 + 6 , 16.0.0.6, 16.0.0.6 + 7 , 16.0.0.7, 16.0.0.7 + 8 , 16.0.0.8, 16.0.0.8 + 9 , 16.0.0.9, 16.0.0.9 + 10 , 16.0.0.10, 16.0.0.10 +|================= + +*The results:* + +* Total packets are 20 as expected, 10 generated by each thread +* Field engine is the same for both threads + + +===== With Split + +[source,python] +---- +# no split +class STLS1(object): + """ attack 48.0.0.1 at port 80 + """ + + def __init__ (self): + self.max_pkt_size_l3 =9*1024; + + def create_stream (self): + + base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S") + + vm = CTRexScRaw( [ STLVmFlowVar(name="ip_src", + min_value="16.0.0.0", + max_value="18.0.0.254", + size=4, op="inc"), + + STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ), + + STLVmFixIpv4(offset = "IP"), # fix checksum + ] + ,split_by_field = "ip_src" <1> + + ) + + pkt = STLPktBuilder(pkt = base_pkt, + vm = vm) + + return STLStream(packet = pkt, + mode = STLTXSingleBurst(total_pkts = 20)) <2> + +---- +<1> Split is added by `ip_src` stream variable +<2> burst of 20 packets + + +.Variable per thread +[format="csv",cols="1^,3^,3^", options="header",width="40%"] +|================= +pkt, thread-0 ip_src,thread-1 ip_src + 1 , 16.0.0.1 , 17.0.0.128 + 2 , 16.0.0.2 , 17.0.0.129 + 3 , 16.0.0.3 , 17.0.0.130 + 4 , 16.0.0.4 , 17.0.0.131 + 5 , 16.0.0.5 , 17.0.0.132 + 6 , 16.0.0.6, 17.0.0.133 + 7 , 16.0.0.7, 17.0.0.134 + 8 , 16.0.0.8, 17.0.0.135 + 9 , 16.0.0.9, 17.0.0.136 + 10 , 16.0.0.10, 17.0.0.137 +|================= + +*The results:* + +* Total packets are 20 as expected, 10 generated by each thread +* Field engine is *not* the same for both threads. + + +==== Tutorial: Field Engine, Null stream + +The following example demonstrates a way create a Stream with no packets. The use cases is to use the Null stream inter stream gap (ISG) and then go to a new stream. +using this you can create loops like this: + +image::images/stl_null_stream.png[title="Null Stream",align="left",width=600, link="images/stl_null_stream.png"] + +1. S1 - send_burst of packets, go to stream NULL +2. NULL - wait ISG time - go to S1 + +Null stream is with configured with + +1. mode: burst +2. number of packets: 0 + + +==== Tutorial: Field Engine, Barrier stream (Split) - [TODO] + +image::images/stl_barrier.png[title="Barrier Stream",align="left",width=600, link="images/stl_barrier.png"] + +In some cases there is a need to split the streams to thread in a way that specific stream will continue only after all the threads pass the same path. +In the above figure we would like to that stream S3 will start on all the thread after S2 was finished by all the threads + -==== Tutorial 13: Pcap file to *one* stream +==== Tutorial: Pcap file to *one* stream There is a way to load *one* packet data into a stream. There is an assumption that this pcap. only the first packet from this pcap is taken. @@ -1177,7 +1341,7 @@ file: `stl/udp_1pkt_pcap_relative_path.py` ---- <1> packet is taken from pcap file relative to *profile* file location -==== Tutorial 14: Pcap file to many streams +==== Tutorial: Pcap file to many streams The following example demonstrates a way to load pcap with *number* of packets and for each packet create a stream with burst of 1. @@ -1320,7 +1484,7 @@ $./stl-sim -f stl/pcap.py --yaml <4> self_start is disabled for all the streams except the first one -==== Tutorial 15: Pcap file to many streams and Field Engine +==== Tutorial: Pcap file to many streams and Field Engine The following example demonstrates a way to load pcap file to many stream and attach to each stream a Field Engine program. For example change the IP.src of all the streams to a random number @@ -1401,7 +1565,7 @@ pkt, IPv4 , flow |================= -==== Tutorial 16: Source and Destination MAC address +==== Tutorial: Source and Destination MAC address Each TRex port has a source MAC configure and destination MAC (DUT) configured in /etc/trex_cfg.yaml By default those MAC (source and destination) is taken @@ -1432,7 +1596,7 @@ file: `stl/udp_1pkt_1mac_override.py` ---- <1> Don't take TRex port src interface MAC -==== Tutorial 17: Teredo tunnel (IPv6 over IPv4) +==== Tutorial: Teredo tunnel (IPv6 over IPv4) The following example demonstrates creating IPv6 packet inside IPv4 packet and create a range of IPs @@ -1467,7 +1631,7 @@ file: `stl/udp_1pkt_ipv6_in_ipv4.py` <3> Write stream tuple.port variable into the second UDP header -==== Tutorial 18: Mask instruction +==== Tutorial: Mask instruction The STLVmWrMaskFlowVar is a handy command. The pseudocode is a folow @@ -1574,6 +1738,140 @@ value |================= +==== Tutorial: Advance traffic profile - platform [TODO] + + +===== Direction + +To make the traffic profile more usable, the traffic profile support per direction/interface. + +[source,python] +---- +def create_streams (self, direction = 0,**args): + if direction = 0: + rate =100 <1> + else: + rate =200 + return [STLHltStream(tcp_src_port_mode = 'decrement', + tcp_src_port_count = 10, + tcp_src_port = 1234, + tcp_dst_port_mode = 'increment', + tcp_dst_port_count = 10, + tcp_dst_port = 1234, + name = 'test_tcp_ranges', + direction = direction, + rate_pps = rate, + ), + ] +---- +<1> Different rate base on direction + +[source,bash] +---- +$start -f ex1.py -a +---- + +If you have 4 interfaces + +interfaces 0/2 is direction 0 +interfaces 1/3 is direction 1 + +So rate will be changed accordingly. + +===== Per Interface + +In this case there is a different profile base on interface ID + +[source,python] +---- + +def create_streams (self, direction = 0, **args): + + port_id = args.get('port_id') + if port_id==None: + port_id=0; + + if port_id == 0: + return [STLHltStream(tcp_src_port_mode = 'decrement', + tcp_src_port_count = 10, + tcp_src_port = 1234, + tcp_dst_port_mode = 'increment', + tcp_dst_port_count = 10, + tcp_dst_port = 1234, + name = 'test_tcp_ranges', + direction = direction, + rate_pps = rate, + ), + ] + + if port_id == 1: + return STLHltStream( + #enable_auto_detect_instrumentation = '1', # not supported yet + ip_dst_addr = '192.168.1.3', + ip_dst_count = '1', + ip_dst_mode = 'increment', + ip_dst_step = '0.0.0.1', + ip_src_addr = '192.168.0.3', + ip_src_count = '1', + ip_src_mode = 'increment', + ip_src_step = '0.0.0.1', + l3_imix1_ratio = 7, + l3_imix1_size = 70, + l3_imix2_ratio = 4, + l3_imix2_size = 570, + l3_imix3_ratio = 1, + l3_imix3_size = 1518, + l3_protocol = 'ipv4', + length_mode = 'imix', + #mac_dst_mode = 'discovery', # not supported yet + mac_src = '00.00.c0.a8.00.03', + mac_src2 = '00.00.c0.a8.01.03', + pkts_per_burst = '200000', + rate_percent = '0.4', + transmit_mode = 'continuous', + vlan_id = '1', + direction = direction, + ) + + if port_id = 3: + .. +---- + +The Console will give the port/direction and will get the right stream in each interface + + +===== Tunable + +[source,python] +---- + +class STLS1(object): + + def __init__ (self): + self.num_clients =30000; # max is 16bit <1> + self.fsize =64 + + def create_stream (self): + + # create a base packet and pad it to size + size = self.fsize - 4; # no FCS + base_pkt = Ether(src="00:00:dd:dd:00:01")/IP(src="55.55.1.1",dst="58.0.0.1")/UDP(dport=12,sport=1025) + pad = max(0, size - len(base_pkt)) * 'x' + +---- +<1> Define object args + + +[source,bash] +---- +$start -f ex1.py -t "fsize=1500,num_clients=10000" #<1> +---- +<1> Change the Tunable using -t option + +Once a profile was defined, it is possible to give a tunable from Console and change the default value. +In this example, change the fsize to 1500 bytes + + === Tutorials HLT profile HLTAPI is a Cisco standard API for traffic generation.IXIA and Spirent support this standard. traffic_config API has set of arguments for specifying the packet, how to send it and what field to change while sending it. @@ -1654,9 +1952,46 @@ TRex>start -f stl/hlt/hlt_udp_inc_dec_len_9k.py -m 10mbps -a more profiles and example can be found in `stl/hlt` folder +==== Tutorial: Per stream statistics + +* Per stream statistic is implemented using hardware assist on the X710/XL710 Intel NIC using flow director rules +* With I350/82599 it is implemented in software. + + [TODO IDO] + +1. Works only for IPv4 (with or without VLAN) +2. IPv4 Identification field is changed to a reserve values. +3. All users acquire interfaces is configured with a rule to count this stream id (base on IPV4.id) +4. Client sum the counter per stream (it is sent on ZMQ async channel) +5. Number of RX stats are 128 + + +[source,python] +---- + +class STLS1(object): + + def get_streams (self, direction = 0): + return [STLStream(packet = + STLPktBuilder( + pkt ="stl/yaml/udp_64B_no_crc.pcap"), + mode = STLTXCont(pps=10), + rx_stats = STLRxStats(user_id = 7)) <1> + ] + +---- +<1> Configure this stream to be count on all RX ports as user_id=7 + +* TUI should show Tx/Rx stats [TODO] +* Python API to get the info [TODO] + + +==== Tutorial: Per stream latency/Jitter [TODO] + + + === Tutorials Native Python API -==== Tutorial 1: Python API examples are located here: `automation/trex_control_plane/stl/examples` Python API library is located here: `automation/trex_control_plane/stl/trex_stl_lib` @@ -1672,7 +2007,7 @@ file: `stl_bi_dir_flows.py` def simple_burst (): # create client - c = STLClient() + c = STLClient() # default user is $USER. Can be specified explicitly passed = True try: @@ -1693,7 +2028,7 @@ def simple_burst (): c.connect() # prepare our ports (my machine has 0 <--> 1 with static route) - c.reset(ports = [0, 1]) # it will Acquire port 0,1 + c.reset(ports = [0, 1]) # Acquire port 0,1 for $USER # add both streams to ports c.add_streams(s1, ports = [0]) @@ -1747,8 +2082,6 @@ simple_burst() === Tutorials HLT Python API - - HLT Python API is a layer on top the native layer. it support * Device Control @@ -1882,10 +2215,9 @@ if __name__ == "__main__": <2> import HLT TRex - === Reference -=== Stream +==== Stream ==== Packet @@ -1895,9 +2227,380 @@ if __name__ == "__main__": === Console commands +==== Overview + +The console will use TRex Client API for controling TRex +Some guidelines: + +* Console should not save it own state, it should only cache server state. It assumed there is only one console that has R/W capability so once connected as R/W console (per user/interface) it could read the server state and then cache all the operations. +* There could be many read-only clients for the same user same interface. +* Console should sync with server to get the state in connection stage and cache the server information locally +* In case of crash/exit of the Console it should sync again at startup +* Commands will be like bash shell - no order args, many flags +* Ability to show stats in real time. Gives the option to open two Console one for statistics and one for commands ( many read-only clients) + +==== Ports State + +[options="header",cols="^1,3a"] +|================= +| state | meaning +| IDLE | no streams, does not work +| STREAMS | with streams, does not work +| WORK | with streams, works +| PAUSE | with streams, pause +|================= + + +[source,bash] +---- + + IDLE -> (add streams) -> STREAMS (start) -> WORK (stop) -> STREAMS (start) + | WORK (pause) -> PAUSE (resume )--- + | | + | | + ------------------------------------ + +----- + +==== Common Arguments + +This section includes arguments that are common to many commands +In the command they will be marked like this (arg name) + +==== Port mask + +this gives the ability to choose batch of ports + +[source,bash] +---- +$command [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] + + port mask : + [-a] : all ports + [-port 1 2 3] : port 1,2 3 + [-port 0xff] : port by mask 0x1 for port 0 0x3 for port 0 and 1 + [-port clients/servers] : -port clients will choose all the client side ports +---- + +==== Duration + +duration in second or in min or hours + +[source,bash] +---- +$command[-d 100] [-d 10m] [-d 1h] + + duration: + -d 100 : in sec + -d 10m : in min + -d 1h : in hours +---- + + +==== Multiplier + +[source,bash] +---- +$command [-m 100] [-m 10gb] [-m 10kpps] [-m 40%] + + multiplier : + + -m 100 : multiply stream file by this factor + -m 10gb : from graph calculate the maximum rate as this bandwidth for all streams( for each port ) + -m 10kpps : from graph calculate the maximum rate as this pps for all streams ( for each port ) + -m 40% : from graph calculate the maximum rate as this precent from total port ( for each port ) +---- + + +==== Commands + +===== Connect + +[source,bash] +---- + +$trex-con [--ip $IP] [--server $IP] [--rpc-port $PORT] [--async_port port] + + --rpc-port : change the default server - default 5505 for RPC + + --async_port : for sub/pub ZMQ - default 4505 + + --ip or --server :default 127.0.0.1 the TRex server ip +---- + +This command +* try to connect to server +* send ping command +* sync with all the ports info / streams info state +* read all counters stats for reference + +===== reset + +Reset the server and client to a known state - should not be used in a normal scenario + +[source,bash] +---- +$reset +---- + +- force acuire all the ports +- Stop all traffic on all the ports +- Remove all the streams from all the ports + + +===== port + +Configure port state, autoneg, rate etc + +[source,bash] +---- +$port (port mask) --cfg "auto/10/" + + --cfg string with the configuration name + +---- + + +===== clear + +Clear all port stats counters + +[source,bash] +---- +$clear (port mask) +---- + + +===== stats + +Shows global and port statistic + +[source,bash] +---- +$stats (port mask) [-g] [-p] [-ps] + + -g show only global stats + -p only ports stats + -ps only port status (type/driver/link-up/down/negotion type etc) + +---- + + +===== streams + +Shows the configured streams on each port/ports +Should show from client cache + +[source,bash] +---- +$streams (port mask) [--streams mask] [-f] [--full] [--graph] + + --port mask, e.g --port 1 2 3 4 + --streams mask e.g. --streams 1 2 + -f /--full print stream info in a JSON format with all the information + --graph : add the graph in time of each port stream +---- + + +example + +[source,bash] +---- +$streams + +port 0 : imix/a.yaml + + stream id , packet type , length , mode , rate , next + + 0 , ip/tcp , 64 , continues , 100KPPS , none + + 1 , ip/udp , 128 , burst , 200KPPS , none + + 2 , ip/udp , 1500 , multi-burst , 100KPPS , none + + + +port 1 : imix/a.yaml + + + 0 , ip/tcp , 64 , continues , 100KPPS , none + + 1 , ip/udp , 128 , burst , 200KPPS , none + + 2 , ip/udp , 1500 , multi-burst , 100KPPS , none + +---- + + +show only port 1 and 2 + +[source,bash] +---- +$streams --port 1 2 + + .. + .. +---- + +[source,bash] +---- +$streams --port 0 --streams 0 -f + + + show the full info on stream 0 and port 0, print in JSON format + +---- + + +===== start + +* work on a set of ports +* remove all streams +* load new streams +* start traffic with specific multiplier +* limit the traffic to a specific duration +* port state should be stopped, in case of --force stop the port +* in case one of the port is not stop don't start any port +* all ports should be in state IDLE or STREAMS + +[source,bash] +---- +$start [--force] (port mask) [-f stl/imix.yaml] [-db ab] (duration) (multiplier) + + + stream to load: + -f stl/imix.yaml : load from local disk the streams file + --db stream that was loaded to db + + force: + --force stop ports if they are active + +---- + +examples + + +[source,bash] +---- +$start -a -f stl/imix.yaml -m 10gb +---- +start this profile on all all ports maximum bandwidth is 10gb + + +[source,bash] +---- +$start -port 1 2 -f stl/imix.yaml -m 100 +---- +start this profile on port 1,2 multiply by 100 + + +[NOTE] +===================================== + in case of start command without args, try to remember the last args given and reprint them +===================================== + +===== stop + +* work on a set of ports +* change the mode of the port to stopped +* do not remove the streams +* in case port state is already stopped don't do anything +* all ports should be in state WORK + + +[source,bash] +---- +$stop (port mask) + + See ports command explanation from the start + +---- + + +===== pause + +* work on a set of ports +* move a wokring set of ports to a state of pause +* all ports should be in state WORK + + + +[source,bash] +---- +$pause (port mask) + + see ports command explanation from start + +---- + + +===== resume + +* work on a set of ports +* move a wokring set of port to a state of resume +* all ports should be in state PAUSE + + + +[source,bash] +---- +$resume (port mask) + + see ports command explanation from start + +---- + + +===== restart + +* restart the work on the loaded streams +* same as start without the -f /--db switch +* all ports should be in state STREAMS + +[source,bash] +---- +$restart (port mask) (duration) (multiplier) + + see ports command explanation from start + +---- + +===== update + +* all ports should be in state WORK + + +[source,bash] +---- +>update (port mask) (multiplier) +---- +Update the bandwidth multiplier for a mask of ports + + +[NOTE] +===================================== + Here we could add the ability to disable/enable specific stream, load new stream dynamically etc. +===================================== + + +===== tui + +shows the stats in a textual window (like top) + +[source,bash] +---- +$tui +---- + +enter to a mode of Stats and present 3 type of windows +* global/port stats/version/connected etc +* per port +* per port streams info + + +get keyboard + q - quit the gui window + c - clear all counters + + === Appendix + ==== HLT supported Arguments diff --git a/images/Thumbs.db b/images/Thumbs.db index 175b366f..9fb5649d 100755 Binary files a/images/Thumbs.db and b/images/Thumbs.db differ diff --git a/images/stl_barrier.png b/images/stl_barrier.png new file mode 100644 index 00000000..a454e51b Binary files /dev/null and b/images/stl_barrier.png differ diff --git a/images/stl_null_stream.png b/images/stl_null_stream.png new file mode 100644 index 00000000..8c415965 Binary files /dev/null and b/images/stl_null_stream.png differ diff --git a/images/trex_2_stateless.png b/images/trex_2_stateless.png index 01787f99..5208fe49 100644 Binary files a/images/trex_2_stateless.png and b/images/trex_2_stateless.png differ diff --git a/images/trex_stateless_multi_user.png b/images/trex_stateless_multi_user.png new file mode 100644 index 00000000..132b4250 Binary files /dev/null and b/images/trex_stateless_multi_user.png differ diff --git a/visio_drawings/trex_2.0_stateless.vsd b/visio_drawings/trex_2.0_stateless.vsd index 4ae6420d..cfebc367 100755 Binary files a/visio_drawings/trex_2.0_stateless.vsd and b/visio_drawings/trex_2.0_stateless.vsd differ -- cgit 1.2.3-korg From 9de81123201e66eddc82b8b70c7a4e458225211f Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 3 Mar 2016 18:13:14 +0200 Subject: v.194 --- release_notes.asciidoc | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index f052a72e..84eaf608 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,29 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.94 == + +* Fix Python API stop/sync issue. Now TX counters are synced in case of stop API +* Improve performance of Python API, ~2000 cycles/sec of load/start/stop +* Add per stream Tx/Rx statistics for XL710/X710 NICS work in flow-director hardware + +[source,python] +---- +class STLS1(object): + + def get_streams (self, direction = 0): + return [STLStream(packet = + STLPktBuilder( + pkt ="stl/yaml/udp_64B_no_crc.pcap"), + mode = STLTXCont(pps=10), + rx_stats = STLRxStats(user_id = 7)) <1> + ] +---- +<1> Configure this stream to be count on all RX ports as user_id=7 + +* Add HTLAPI full example (examples `examples/hlt_udp_simple.py`) + + == Release 1.93 == * Support port attribute API and Console command. See `$portattr -a --prom` -- cgit 1.2.3-korg From a486195ac9cd4f2d579f02c97801cc51131486ef Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 3 Mar 2016 18:14:47 +0200 Subject: v.194-minor --- release_notes.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 84eaf608..f1c7b092 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,7 +20,7 @@ ifdef::backend-docbook[] endif::backend-docbook[] -== Release 1.94 == +== Release 1.94 [not released] == * Fix Python API stop/sync issue. Now TX counters are synced in case of stop API * Improve performance of Python API, ~2000 cycles/sec of load/start/stop -- cgit 1.2.3-korg From c323210b8eaf2168c6729ec835ee192169fece2c Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 3 Mar 2016 18:19:25 +0200 Subject: minor rx-stats --- draft_trex_stateless.asciidoc | 74 ++++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 90bf72c0..f1d56f89 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -1872,6 +1872,44 @@ Once a profile was defined, it is possible to give a tunable from Console and ch In this example, change the fsize to 1500 bytes +==== Tutorial: Per stream statistics + +* Per stream statistic is implemented using hardware assist on the X710/XL710 Intel NIC using flow director rules +* With I350/82599 it is implemented in software. + + [TODO IDO] + +1. Works only for IPv4 (with or without VLAN) +2. IPv4 Identification field is changed to a reserve values. +3. All users acquire interfaces is configured with a rule to count this stream id (base on IPV4.id) +4. Client sum the counter per stream (it is sent on ZMQ async channel) +5. Number of RX stats are 128 + + +[source,python] +---- + +class STLS1(object): + + def get_streams (self, direction = 0): + return [STLStream(packet = + STLPktBuilder( + pkt ="stl/yaml/udp_64B_no_crc.pcap"), + mode = STLTXCont(pps=10), + rx_stats = STLRxStats(user_id = 7)) <1> + ] + +---- +<1> Configure this stream to be count on all RX ports as user_id=7 + +* TUI should show Tx/Rx stats [TODO] +* Python API to get the info [TODO] + + +==== Tutorial: Per stream latency/Jitter [TODO] + + + === Tutorials HLT profile HLTAPI is a Cisco standard API for traffic generation.IXIA and Spirent support this standard. traffic_config API has set of arguments for specifying the packet, how to send it and what field to change while sending it. @@ -1952,42 +1990,6 @@ TRex>start -f stl/hlt/hlt_udp_inc_dec_len_9k.py -m 10mbps -a more profiles and example can be found in `stl/hlt` folder -==== Tutorial: Per stream statistics - -* Per stream statistic is implemented using hardware assist on the X710/XL710 Intel NIC using flow director rules -* With I350/82599 it is implemented in software. - - [TODO IDO] - -1. Works only for IPv4 (with or without VLAN) -2. IPv4 Identification field is changed to a reserve values. -3. All users acquire interfaces is configured with a rule to count this stream id (base on IPV4.id) -4. Client sum the counter per stream (it is sent on ZMQ async channel) -5. Number of RX stats are 128 - - -[source,python] ----- - -class STLS1(object): - - def get_streams (self, direction = 0): - return [STLStream(packet = - STLPktBuilder( - pkt ="stl/yaml/udp_64B_no_crc.pcap"), - mode = STLTXCont(pps=10), - rx_stats = STLRxStats(user_id = 7)) <1> - ] - ----- -<1> Configure this stream to be count on all RX ports as user_id=7 - -* TUI should show Tx/Rx stats [TODO] -* Python API to get the info [TODO] - - -==== Tutorial: Per stream latency/Jitter [TODO] - === Tutorials Native Python API -- cgit 1.2.3-korg From cd5d5064bb144924d2d47db8fdc6c4f6aa9260d3 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 3 Mar 2016 18:50:17 +0200 Subject: minor change order --- draft_trex_stateless.asciidoc | 325 ++++++++++++++++++++++++------------------ 1 file changed, 186 insertions(+), 139 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index f1d56f89..baf4006c 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -103,9 +103,191 @@ image::images/stateless_objects.png[title="TRex Objects ",align="left",width=600 === Tutorials This tutorial will walk you through basic but complete TRex Stateless use cases that will show you common concepts as well as slightly more advanced ones. + +==== Tutorial: Simple IPv4/UDP packet - TRex + +*Goal* : send simple UDP packet from all the ports + +===== Understand the traffic profile + +file: `stl/udp_1pkt_simple.py` + +[source,python] +---- +from trex_stl_lib.api import * + +class STLS1(object): + + def create_stream (self): + + return STLStream( + packet = + STLPktBuilder( + pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/ + UDP(dport=12,sport=1025)/(10*'x') <1> + ), + mode = STLTXCont()) <2> + + + def get_streams (self, direction = 0): + # create 1 stream + return [ self.create_stream() ] + + +# dynamic load - used for trex console or simulator +def register(): <3> + return STLS1() +---- +<1> Define the packet, in this case it IP/UDP with 10 bytes of 'x'.See more here link:http://www.secdev.org/projects/scapy/doc/[Scapy] +<2> Mode is Continues with rate of 1 PPS (default rate is 1 PPS) +<3> Each Traffic profile module should have a `register` function + + +===== Run TRex as a server mode + +First run trex in interactive mode + +[source,bash] +---- +$sudo ./t-rex-64 -i +---- + +===== Connect with Console + +From the same machine in a different terminal connect to trex (you can do it from remote machine with -s [ip]) + +from console you can run this + +[source,bash] +---- +$trex-console + +Connecting to RPC server on localhost:4501 [SUCCESS] +connecting to publisher server on localhost:4500 [SUCCESS] +Acquiring ports [0, 1, 2, 3]: [SUCCESS] + +125.69 [ms] + +TRex > start -f stl/udp_1pkt_simple.py -m 10mbps -a #<1> + +Removing all streams from port(s) [0, 1, 2, 3]: [SUCCESS] +Attaching 1 streams to port(s) [0, 1, 2, 3]: [SUCCESS] +Starting traffic on port(s) [0, 1, 2, 3]: [SUCCESS] + +# pause the traffic on all port +>pause -a #<2> + +# resume the traffic on all port +>resume -a #<3> + +# stop traffic on all port +>stop -a #<4> + +# show dynamic statistic +>tui +---- +<1> Start the traffic on all the ports in 10mbps. you can try with 14MPPS +<2> Pause the traffic +<3> Resume +<4> Stop on all the ports + + +To look into the streams using `streams -a` + +.Streams +[source,bash] +---- + +TRex > streams -a +Port 0: + + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 + +Port 1: + + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 + +Port 2: + + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 + +Port 3: + + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 + +TRex > +---- + + +to get help on a command run `command --help` + +to look into general statistics + +[source,bash] +---- +Global Statistics + +Connection : localhost, Port 4501 +Version : v1.93, UUID: N/A +Cpu Util : 0.2% + : +Total Tx L2 : 40.01 Mb/sec +Total Tx L1 : 52.51 Mb/sec +Total Rx : 40.01 Mb/sec +Total Pps : 78.14 Kpkt/sec + : +Drop Rate : 0.00 b/sec +Queue Full : 0 pkts + +Port Statistics + + port | 0 | 1 | + -------------------------------------------------------- + owner | hhaim | hhaim | + state | ACTIVE | ACTIVE | + -- | | | + Tx bps L2 | 10.00 Mbps | 10.00 Mbps | + Tx bps L1 | 13.13 Mbps | 13.13 Mbps | + Tx pps | 19.54 Kpps | 19.54 Kpps | + Line Util. | 0.13 % | 0.13 % | + --- | | | + Rx bps | 10.00 Mbps | 10.00 Mbps | + Rx pps | 19.54 Kpps | 19.54 Kpps | + ---- | | | + opackets | 1725794 | 1725794 | + ipackets | 1725794 | 1725794 | + obytes | 110450816 | 110450816 | + ibytes | 110450816 | 110450816 | + tx-bytes | 110.45 MB | 110.45 MB | + rx-bytes | 110.45 MB | 110.45 MB | + tx-pkts | 1.73 Mpkts | 1.73 Mpkts | + rx-pkts | 1.73 Mpkts | 1.73 Mpkts | + ----- | | | + oerrors | 0 | 0 | + ierrors | 0 | 0 | + + status: / + + browse: 'q' - quit, 'g' - dashboard, '0-3' - port display + dashboard: 'p' - pause, 'c' - clear, '-' - low 5%, '+' - up 5%, +---- + + +INFO: The SRC/DST MAC addrees are taken from /etc/trex_cfg.yaml. if you want to change them to be different just add Ether(dst="00:00:dd:dd:00:01") with your destination + ==== Tutorial: Simple IPv4/UDP packet - Simulator +*Goal* : Learn how to use the TRex Stateless simulator,important for more complex use cases + The following example demonstrates the most basic use case using our simulator. file: `stl/udp_1pkt_simple.py` @@ -312,148 +494,12 @@ $./stl-sim -f stl/udp_1pkt_simple.py --pkt 0030 78 78 78 78 xxxx ---- -==== Tutorial: Simple IPv4/UDP packet - TRex - -===== Run TRex as a server mode - -First run trex in interactive mode - -[source,bash] ----- -$sudo ./t-rex-64 -i ----- - -===== Connect with Console - -From the same machine in a different terminal connect to to trex (you can do it from remote machine with -s [ip] -from console you can run this +==== Tutorial: Multi stream support -[source,bash] ----- -$trex-console +*Goal* : Send more than one stream -Connecting to RPC server on localhost:4501 [SUCCESS] -connecting to publisher server on localhost:4500 [SUCCESS] -Acquiring ports [0, 1, 2, 3]: [SUCCESS] - -125.69 [ms] - -TRex > start -f stl/udp_1pkt_simple.py -m 10mbps -a #<1> - -Removing all streams from port(s) [0, 1, 2, 3]: [SUCCESS] -Attaching 1 streams to port(s) [0, 1, 2, 3]: [SUCCESS] -Starting traffic on port(s) [0, 1, 2, 3]: [SUCCESS] - -# pause the traffic on all port ->pause -a #<2> - -# resume the traffic on all port ->resume -a #<3> - -# stop traffic on all port ->stop -a #<4> - -# show dynamic statistic ->tui ----- -<1> Start the traffic on all the ports in 10mbps. you can try with 14MPPS -<2> Pause the traffic -<3> Resume -<4> Stop on all the ports - - -To look into the streams using `streams -a` - -.Streams -[source,bash] ----- - -TRex > streams -a -Port 0: - - ID | packet type | length | mode | rate | next stream - ----------------------------------------------------------------------------------------------- - 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 - -Port 1: - - ID | packet type | length | mode | rate | next stream - ----------------------------------------------------------------------------------------------- - 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 - -Port 2: - - ID | packet type | length | mode | rate | next stream - ----------------------------------------------------------------------------------------------- - 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 - -Port 3: - - ID | packet type | length | mode | rate | next stream - ----------------------------------------------------------------------------------------------- - 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 - -TRex > ----- - - -to get help on a command run `command --help` - -to look into general statistics - -[source,bash] ----- -Global Statistics - -Connection : localhost, Port 4501 -Version : v1.93, UUID: N/A -Cpu Util : 0.2% - : -Total Tx L2 : 40.01 Mb/sec -Total Tx L1 : 52.51 Mb/sec -Total Rx : 40.01 Mb/sec -Total Pps : 78.14 Kpkt/sec - : -Drop Rate : 0.00 b/sec -Queue Full : 0 pkts - -Port Statistics - - port | 0 | 1 | - -------------------------------------------------------- - owner | hhaim | hhaim | - state | ACTIVE | ACTIVE | - -- | | | - Tx bps L2 | 10.00 Mbps | 10.00 Mbps | - Tx bps L1 | 13.13 Mbps | 13.13 Mbps | - Tx pps | 19.54 Kpps | 19.54 Kpps | - Line Util. | 0.13 % | 0.13 % | - --- | | | - Rx bps | 10.00 Mbps | 10.00 Mbps | - Rx pps | 19.54 Kpps | 19.54 Kpps | - ---- | | | - opackets | 1725794 | 1725794 | - ipackets | 1725794 | 1725794 | - obytes | 110450816 | 110450816 | - ibytes | 110450816 | 110450816 | - tx-bytes | 110.45 MB | 110.45 MB | - rx-bytes | 110.45 MB | 110.45 MB | - tx-pkts | 1.73 Mpkts | 1.73 Mpkts | - rx-pkts | 1.73 Mpkts | 1.73 Mpkts | - ----- | | | - oerrors | 0 | 0 | - ierrors | 0 | 0 | - - status: / - - browse: 'q' - quit, 'g' - dashboard, '0-3' - port display - dashboard: 'p' - pause, 'c' - clear, '-' - low 5%, '+' - up 5%, ----- - - -==== Tutorial: Simple IPv4/UDP packet The following example demonstrates @@ -518,8 +564,9 @@ TRex>start -f stl/stl/burst_3pkt_600pkt.py --port 0 ==== Tutorial: Multi Burst mode -file: `stl/multi_burst_2st_1000pkt.py` +*Goal* : Learn Multi burst +file: `stl/multi_burst_2st_1000pkt.py` [source,python] ---- -- cgit 1.2.3-korg From 3866e35dc86d700c19001c46e56081ce6ae6c4cc Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Sun, 6 Mar 2016 11:15:28 +0200 Subject: add virtual NICs and passthrough in ESXi config info --- images/passthrough_adding.png | Bin 0 -> 3678 bytes images/passthrough_marking.png | Bin 0 -> 21375 bytes images/vSwitch_loopback.png | Bin 0 -> 3578 bytes images/vSwitch_main.png | Bin 0 -> 4880 bytes images/vSwitch_networks.png | Bin 0 -> 2400 bytes trex_book.asciidoc | 24 +++++++++++++++++++++++- 6 files changed, 23 insertions(+), 1 deletion(-) create mode 100755 images/passthrough_adding.png create mode 100755 images/passthrough_marking.png create mode 100755 images/vSwitch_loopback.png create mode 100755 images/vSwitch_main.png create mode 100755 images/vSwitch_networks.png diff --git a/images/passthrough_adding.png b/images/passthrough_adding.png new file mode 100755 index 00000000..4b2c3167 Binary files /dev/null and b/images/passthrough_adding.png differ diff --git a/images/passthrough_marking.png b/images/passthrough_marking.png new file mode 100755 index 00000000..f3d4ac7b Binary files /dev/null and b/images/passthrough_marking.png differ diff --git a/images/vSwitch_loopback.png b/images/vSwitch_loopback.png new file mode 100755 index 00000000..6becc4d7 Binary files /dev/null and b/images/vSwitch_loopback.png differ diff --git a/images/vSwitch_main.png b/images/vSwitch_main.png new file mode 100755 index 00000000..37e0b08e Binary files /dev/null and b/images/vSwitch_main.png differ diff --git a/images/vSwitch_networks.png b/images/vSwitch_networks.png new file mode 100755 index 00000000..eca79142 Binary files /dev/null and b/images/vSwitch_networks.png differ diff --git a/trex_book.asciidoc b/trex_book.asciidoc index f0ef132b..3c7b30fd 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -441,7 +441,21 @@ zmq publisher at: tcp://*:4500 WARNING: If you don't see rx packets, revisit your MAC address configuration. -==== Running TRex for the first time with virtual NICs + vSwitch +==== Running TRex for the first time with ESXi: + +* Virtual NICs can be used to bridge between TRex and non-supported NICs or get some basic impression/testing. Bandwidth is limited by vSwitch, has ipv6 issues. + +1. Click on the host machine, enter Configuration -> Networking. + +a. One of the NICs should be connected to the main vSwitch network to get "outside" connection, for the TRex client and ssh: + +image:images/vSwitch_main.png[title="vSwitch_main"] + +b. Other NICs that are used for TRex traffic should be in distinguish vSwitch: + +image:images/vSwitch_loopback.png[title="vSwitch_loopback"] + +2. Right click on guest machine -> Edit settings -> Ensure the NICs are set to their networks: + +image:images/vSwitch_networks.png[title="vSwitch_networks"] + [NOTE] ===================================================================== @@ -452,6 +466,14 @@ sudo ./t-rex-64 -f cap2/dns.yaml --lm 1 --lo -l 1000 -d 100 vSwitch can't know where to "route" the packet, it supposed to be fixed once TRex supports ARP ===================================================================== +* Pass-through is the way to use directly the NICs from host machine inside the VM. Has no limitations except the NIC/hardware itself. The only difference via bare-metal OS is seldom spikes of latency (~10ms). Passthrough settings can't be saved to OVA. + +1. Click on the host machine, enter Configuration -> Advanced settings -> Edit. Mark the wanted NICs. Reboot the ESXi to apply. + +image:images/passthrough_marking.png[title="passthrough_marking"] + +2. Right click on guest machine -> Edit settings -> Add -> *PCI device* -> Choose the NICs one by one. + +image:images/passthrough_adding.png[title="passthrough_adding"] + ==== Running TRex for the first time with router You can follow this presentation link:trex_config_guide.html[first time TRex configuration] -- cgit 1.2.3-korg From da6c8601da77ce858d9abeb55b838bb6ba985d3b Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Mon, 7 Mar 2016 10:57:59 +0200 Subject: Addition to flow stats documentation --- draft_trex_stateless.asciidoc | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index baf4006c..8a4eda47 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -1921,16 +1921,22 @@ In this example, change the fsize to 1500 bytes ==== Tutorial: Per stream statistics -* Per stream statistic is implemented using hardware assist on the X710/XL710 Intel NIC using flow director rules -* With I350/82599 it is implemented in software. +* Per stream statistics is implemented using hardware assist when possible (X710/XL710 Intel NICs flow director rules for example). +* With other NICs (Intel I350, 82599) it is implemented in software. +* Implementation works as follows: +1. User chooses 32 bit packet group id (pg_id). +1. IPv4 Identification field of the stream is changed to a value with in a reserved range (0xff00 to 0xffff). Notice that if a stream for which +no statistics is needed has IPv4 Identification in the reserved range, it is changed (left bit becomes 0). +1. In the software implementation, hardware rules are used to direct packets from relevant streams to rx thread, where they are counted. +In the hardware implementation, HW rules are inserted to count packets from relevant streams. +1. Summed up statistics (per stream, per port) are sent using ZMQ async channel to clients. - [TODO IDO] +* Limitations: -1. Works only for IPv4 (with or without VLAN) -2. IPv4 Identification field is changed to a reserve values. -3. All users acquire interfaces is configured with a rule to count this stream id (base on IPV4.id) -4. Client sum the counter per stream (it is sent on ZMQ async channel) -5. Number of RX stats are 128 +1. Currently, the feature supports only two packet types: +a. IPv4 over ethernet +b. IPv4 with one vlan tag +2. Number of concurrent streams you can get statistics for is 128. [source,python] @@ -1943,11 +1949,11 @@ class STLS1(object): STLPktBuilder( pkt ="stl/yaml/udp_64B_no_crc.pcap"), mode = STLTXCont(pps=10), - rx_stats = STLRxStats(user_id = 7)) <1> + rx_stats = STLRxStats(pg_id = 7)) <1> ] ---- -<1> Configure this stream to be count on all RX ports as user_id=7 +<1> Configure this stream to be counted on all RX ports as packet group id 7 * TUI should show Tx/Rx stats [TODO] * Python API to get the info [TODO] -- cgit 1.2.3-korg From c934e8e95ed4b7467ab09275426acbcc322a72fc Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 7 Mar 2016 13:32:01 +0200 Subject: minor --- draft_trex_stateless.asciidoc | 4 +-- release_notes.asciidoc | 3 +- waf1.css | 78 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 3 deletions(-) create mode 100644 waf1.css diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index baf4006c..84118c51 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -1,5 +1,5 @@ -TRex -==== +TRex Stateless support +====================== :author: hhaim :email: :revnumber: 2.0 diff --git a/release_notes.asciidoc b/release_notes.asciidoc index f1c7b092..60a4fe63 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,7 +20,7 @@ ifdef::backend-docbook[] endif::backend-docbook[] -== Release 1.94 [not released] == +== Release 1.94 == * Fix Python API stop/sync issue. Now TX counters are synced in case of stop API * Improve performance of Python API, ~2000 cycles/sec of load/start/stop @@ -41,6 +41,7 @@ class STLS1(object): <1> Configure this stream to be count on all RX ports as user_id=7 * Add HTLAPI full example (examples `examples/hlt_udp_simple.py`) +* Add user manual draft for Stateless functionality link:draft_trex_stateless.html[here] == Release 1.93 == diff --git a/waf1.css b/waf1.css new file mode 100644 index 00000000..7255051a --- /dev/null +++ b/waf1.css @@ -0,0 +1,78 @@ +div.tableblock > table { + border: 1px solid gray; +} + +div#header-pic { + background-image: url("images/bg4.jpg"); + background-repeat: no-repeat; + background-color: #cccccc; +} + + + +div#header h1 { + background: url('images/trex_logo_64_64.png') no-repeat left center; + padding-left: 80px; + line-height: 80px; + height: 80px; +} + +div.title, caption.title { + text-align: center; + margin-bottom: 0.2em; +} + +div.tableblock > table th { + background-color: #F4F4F4; +} + +h1, h2, h3, h4, h5, h6, span#author, div.title, caption.title, div.admonitionblock .icon, div#toctitle, div.sidebar-title, div.image-title { + color: #333; +} + +h1, h2, h3, h4, h5, h6 { + font-family: Georgia, 'Times New Roman', serif +} + +h1 { + font-size: 24px; +} + +h2,h3,h4,h5,h6 { + color: #f14e32; + padding-top: 0.5em; + font-size: 24px; + line-height: 44px; + font-weight: bold; + margin-top: 20px; + border-bottom: 2px solid silver + float: left; +} + +h3,h4,h5,h6 { + color: #0388a6; + font-size: 18px +} + + +p { + font-family: sans-serif; + text-indent: 0px; + font-size: 14px; + line-height: 22px; + margin-top: 0.5em; + margin-bottom: 0.5em; +} + +a:visited { + color: #404040; +} + + +body, div.sectionbody, div#toctitle { + font-family: Lato, proxima-nova, 'Helvetica Neue', Arial; + font-size : 13px; + color: #404040; +} + + -- cgit 1.2.3-korg From d906e56b2be5340218cef207a41d82a8416d2fe3 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 7 Mar 2016 14:51:34 +0200 Subject: add vsd --- images/stl_streams_example.vsd | Bin 0 -> 164864 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/stl_streams_example.vsd diff --git a/images/stl_streams_example.vsd b/images/stl_streams_example.vsd new file mode 100644 index 00000000..fd60231d Binary files /dev/null and b/images/stl_streams_example.vsd differ -- cgit 1.2.3-korg From d61cdeb1b147ae5e6722e295cfd19c26c67e0717 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 8 Mar 2016 15:32:48 +0200 Subject: add example --- draft_trex_stateless.asciidoc | 867 ++++++++++++++++++++++++++--------------- images/stl_inter.png | Bin 0 -> 18165 bytes images/stl_streams_example.vsd | Bin 164864 -> 183296 bytes 3 files changed, 556 insertions(+), 311 deletions(-) create mode 100644 images/stl_inter.png diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 84118c51..42eadaee 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -1,12 +1,14 @@ TRex Stateless support ====================== -:author: hhaim -:email: +:author: TRex team +:email: trex.tgen@gmail.com :revnumber: 2.0 :quotes.++: :numbered: :web_server_url: http://trex-tgn.cisco.com/trex :local_web_server_url: csi-wiki-01:8181/trex +:github_stl_path: https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/stl +:github_stl_examples_path: https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/automation/trex_control_plane/stl/examples :toclevels: 6 @@ -54,6 +56,33 @@ image::images/stl_streams_example.png[title="Streams example",align="left",width ** RIP/BGP/ISIS/SPF +=== IXIA IXExplorer vs TRex + +TRex has limited functionality compared to IXIA, but has some advantages. The following table summarized the difference + +.TRex vs IXExplorer +[cols="1^,3^,3^,5^", options="header"] +|================= +| Feature | IXExplorer |TRex | Description +| Line rate | Yes |Almost ~15MPPS/core| +| Multi stream | 255 | [green]*Unlimited* | +| Packet build flexibility | Limited | [green]*Scapy- Ulimited* | GRE/VXLAN/NSH is supported by TRex and can be extended to future protocols +| Packet Field engine | limited | [green]*Unlimited* | +| Tx Mode | Continues/Burst/Multi burst | Continues/Burst/Multi burst| +| ARP Emulation | Yes | Not yet - workaround | +| Automation | TCL/Python wrapper to TCL | [green]*native Python/Scapy* | +| Automation speed sec| 30sec | [green]*1msec* | test of load/start/stop/get counters +| HLTAPI | Full support 2000 page of documentation | Limited 20 page Of documentation| +| Per Stream statistic | 255 streams with 4 global mask | 128 rules for XL710/X710 hardware and software for 82599 I350| in case of XL710/X710 thre are some restrictions for the packet type to be recived +| Latency Jitter | Yes | Yes | +| Multi user support | Yes | Yes | +| GUI | very good | not ready yet, packet build is scapy based, not the same as IXIA | +| Cisco pyATS support | Yes | Yes Python 2.7, Python 64bits, WIP to port it to Python 3.0| +| Emulation | Yes | Not yet | +| Port Ids | Base on IXIA numebrs | Depends on PCI enumeration +|================= + + === RPC Architecture To support interactive mode, JSON-RPC2 thread added to the TRex Control Plane core. @@ -100,17 +129,40 @@ image::images/stateless_objects.png[title="TRex Objects ",align="left",width=600 ** *Rate*: in Packet per second or bandwidth ** *Action*: The next stream to go after this stream is finished. Valid for Burst/Continues mode -=== Tutorials +==== TRex package folders + +[cols="1,5", options="header"] +|================= +| Location | Description +| / | t-rex-64/dpdk_set_ports/stl-sim +| /stl | Stateless py example profiles +| /stl/yaml | Stateless YAML profiles +| /stl/htl | Stateless HTL profiles +| /ko | Kernel modules for DPDK +| /external_libs | Python external libs used by server/clients +| /exp | Golden pcap file for unit-tests +| /cfg | Examples of config files +| /cap2 | Stateful profiles +| /avl | Stateful profiles - SFR profile +| /automation | Python client/server code for both Stateful and Stateless +| /automation/regression | Regression for Stateless and Stateful +| /automation/config | Regression setups config files +| /automation/trex_control_plane/stl | Stateless lib and Console +| /automation/trex_control_plane/stl/trex_stl_lib | Stateless lib +| /automation/trex_control_plane/stl/examples | Stateless Examples +|================= + +=== Basic Tutorials This tutorial will walk you through basic but complete TRex Stateless use cases that will show you common concepts as well as slightly more advanced ones. ==== Tutorial: Simple IPv4/UDP packet - TRex -*Goal* : send simple UDP packet from all the ports +Goal:: send simple UDP packet from all the ports -===== Understand the traffic profile +Traffic profile:: -file: `stl/udp_1pkt_simple.py` +file: link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] [source,python] ---- @@ -129,21 +181,22 @@ class STLS1(object): mode = STLTXCont()) <2> - def get_streams (self, direction = 0): + def get_streams (self, direction = 0): <3> # create 1 stream return [ self.create_stream() ] # dynamic load - used for trex console or simulator -def register(): <3> +def register(): <4> return STLS1() ---- <1> Define the packet, in this case it IP/UDP with 10 bytes of 'x'.See more here link:http://www.secdev.org/projects/scapy/doc/[Scapy] <2> Mode is Continues with rate of 1 PPS (default rate is 1 PPS) -<3> Each Traffic profile module should have a `register` function +<3> get_streams function is mandatory +<4> Each Traffic profile module should have a `register` function -===== Run TRex as a server mode +Run TRex as a server mode:: First run trex in interactive mode @@ -152,7 +205,7 @@ First run trex in interactive mode $sudo ./t-rex-64 -i ---- -===== Connect with Console +Connect with Console:: From the same machine in a different terminal connect to trex (you can do it from remote machine with -s [ip]) @@ -168,7 +221,7 @@ Acquiring ports [0, 1, 2, 3]: [SUCCESS] 125.69 [ms] -TRex > start -f stl/udp_1pkt_simple.py -m 10mbps -a #<1> +trex> start -f stl/udp_1pkt_simple.py -m 10mbps -a #<1> Removing all streams from port(s) [0, 1, 2, 3]: [SUCCESS] Attaching 1 streams to port(s) [0, 1, 2, 3]: [SUCCESS] @@ -197,33 +250,30 @@ To look into the streams using `streams -a` .Streams [source,bash] ---- - -TRex > streams -a +trex> streams -a Port 0: - ID | packet type | length | mode | rate | next stream - ----------------------------------------------------------------------------------------------- - 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 Port 1: - ID | packet type | length | mode | rate | next stream - ----------------------------------------------------------------------------------------------- - 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 Port 2: - ID | packet type | length | mode | rate | next stream - ----------------------------------------------------------------------------------------------- - 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 Port 3: - ID | packet type | length | mode | rate | next stream - ----------------------------------------------------------------------------------------------- - 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 - -TRex > + ID | packet type | length | mode | rate | next stream + ----------------------------------------------------------------------------------- + 1 | Ethernet:IP:UDP:Raw | 56 | Continuous | 1.00 pps | -1 ---- @@ -233,6 +283,7 @@ to look into general statistics [source,bash] ---- +TRex >tui Global Statistics Connection : localhost, Port 4501 @@ -280,17 +331,400 @@ Port Statistics dashboard: 'p' - pause, 'c' - clear, '-' - low 5%, '+' - up 5%, ---- +[NOTE] +===================================================================== +The SRC/DST MAC addrees are taken from /etc/trex_cfg.yaml. if you want to change them to be different just add Ether(dst="00:00:dd:dd:00:01") with your destination +===================================================================== + +==== Tutorial: Connect from remote server + +Goal:: Client will connect from remote machine to TRex server + +TRex server is up:: + +Make sure TRex server is running, if not run trex in interactive mode + +[source,bash] +---- +$sudo ./t-rex-64 -i +---- + +Connect with Console:: + +From remote machine you can run this with `-s` flag + +[source,bash] +---- +$trex-console -s csi-kiwi-02 +---- + +if the default python is not 64bit/2.7.x you can change the *PYTHON* environment variable using + +.tcsh +[source,bash] +---- +setenv PYTHON /bin/python #tcsh +---- + +.bash +[source,bash] +---- +extern PYTHON=/bin/mypython #bash +---- + +[NOTE] +===================================================================== +Client machine should run Python 2.7 and Python 64bit version. Cisco CEL/ADS is supported +===================================================================== + +==== Tutorial: Source and Destination MAC address + +Goal:: Change source/destination MAC addrees + +Each TRex port has a source and destination MAC (DUT) configured in /etc/trex_cfg.yaml. +The source MAC is not necessarily the hardware MAC address configured in eeprom. +By default those MAC (source and destination) is taken. +In case a user configures a source or destination MAC explicitly this MAC will take precedence + + +.MAC addrees +[format="csv",cols="2^,2^,2^", options="header",width="40%"] +|================= +Scapy , Source MAC,Destination MAC +Ether() , trex_cfg,trex_cfg +Ether(src="00:bb:12:34:56:01"),"00:bb:12:34:56:01",trex_cfg +Ether(dst="00:bb:12:34:56:01"),trex_cfg,"00:bb:12:34:56:01" +|================= + +For example + +file: link:{github_stl_path}/udp_1pkt_1mac_override.py[stl/udp_1pkt_1mac_override.py] + + +[source,python] +---- + def create_stream (self): + + base_pkt = Ether(src="00:bb:12:34:56:01")/ <1> + IP(src="16.0.0.1",dst="48.0.0.1")/ + UDP(dport=12,sport=1025) +---- +<1> Don't take TRex port src interface MAC instead replace it with 00:bb:12:34:56:01 + +[IMPORTANT] +===================================== +TRex ports will receive a packet only when the packet will have a destination MAC of the `/etc/trex_cfg.yaml`. To configure the port to be promiscuous and get all the packets on the line you can configure it from API or from Console with `portattr -a --prom` +===================================== + +To show the port mode + +[source,bash] +---- +trex>portattr -a --prom #<1> +trex>stats --ps +Port Status + + port | 0 | 1 | + --------------------------------------------------------------- +driver | rte_ixgbe_pmd | rte_ixgbe_pmd | +maximum | 10 Gb/s | 10 Gb/s | +status | IDLE | IDLE | +promiscuous | off | off | #<2> + -- | | | +HW src mac | 90:e2:ba:36:33:c0 | 90:e2:ba:36:33:c1 | +SW src mac | 00:00:00:01:00:00 | 00:00:00:01:00:00 | +SW dst mac | 00:00:00:01:00:00 | 00:00:00:01:00:00 | + --- | | | +PCI Address | 0000:03:00.0 | 0000:03:00.1 | +NUMA Node | 0 | 0 | +---- +<1> Configure all the ports to be promiscuous +<2> Check port promiscuous mode + +==== Tutorial: Python automation + +Goal:: Simple automation test using Python from remote or local machine + +Python API examples are located here: `automation/trex_control_plane/stl/examples` +Python API library is located here: `automation/trex_control_plane/stl/trex_stl_lib` +The Console is using the python API library to interact with TRex server and the protocol is JSON-RPC2 over ZMQ + +file: link:{github_stl_examples_path}/stl_bi_dir_flows.py[stl_bi_dir_flows.py] + + +[source,python] +---- +import stl_path +from trex_stl_lib.api import * <1> + +import time +import json + +# simple packet creation <2> +def create_pkt (size, direction): + + ip_range = {'src': {'start': "10.0.0.1", 'end': "10.0.0.254"}, + 'dst': {'start': "8.0.0.1", 'end': "8.0.0.254"}} + + if (direction == 0): + src = ip_range['src'] + dst = ip_range['dst'] + else: + src = ip_range['dst'] + dst = ip_range['src'] + + vm = [ + # src <4> + STLVmFlowVar(name="src", + min_value=src['start'], + max_value=src['end'], + size=4,op="inc"), + STLVmWrFlowVar(fv_name="src",pkt_offset= "IP.src"), + + # dst + STLVmFlowVar(name="dst", + min_value=dst['start'], + max_value=dst['end'], + size=4,op="inc"), + STLVmWrFlowVar(fv_name="dst",pkt_offset= "IP.dst"), + + # checksum + STLVmFixIpv4(offset = "IP") + ] + + + base = Ether()/IP()/UDP() + pad = max(0, len(base)) * 'x' + + return STLPktBuilder(pkt = base/pad, + vm = vm) + + <3> +def simple_burst (): + + # create client + c = STLClient() + # username/server can be changed those are the default + # username = common.get_current_user(), + # server = "localhost" + # STLClient(server = "my_server",username ="trex_client") for example + passed = True + + try: + # turn this on for some information + #c.set_verbose("high") + + # create two streams + s1 = STLStream(packet = create_pkt(200, 0), + mode = STLTXCont(pps = 100)) + + # second stream with a phase of 1ms (inter stream gap) + s2 = STLStream(packet = create_pkt(200, 1), + isg = 1000, + mode = STLTXCont(pps = 100)) + + + # connect to server + c.connect() + + # prepare our ports (my machine has 0 <--> 1 with static route) + c.reset(ports = [0, 1]) # Acquire port 0,1 for $USER + + # add both streams to ports + c.add_streams(s1, ports = [0]) + c.add_streams(s2, ports = [1]) + + # clear the stats before injecting + c.clear_stats() + + # choose rate and start traffic for 10 seconds on 5 mpps + print "Running 5 Mpps on ports 0, 1 for 10 seconds..." + c.start(ports = [0, 1], mult = "5mpps", duration = 10) <1> + + # block until done + c.wait_on_traffic(ports = [0, 1]) + + # read the stats after the test + stats = c.get_stats() + + print json.dumps(stats[0], indent = 4, separators=(',', ': '), sort_keys = True) + print json.dumps(stats[1], indent = 4, separators=(',', ': '), sort_keys = True) + + lost_a = stats[0]["opackets"] - stats[1]["ipackets"] + lost_b = stats[1]["opackets"] - stats[0]["ipackets"] + + print "\npackets lost from 0 --> 1: {0} pkts".format(lost_a) + print "packets lost from 1 --> 0: {0} pkts".format(lost_b) + + if (lost_a == 0) and (lost_b == 0): + passed = True + else: + passed = False + + except STLError as e: + passed = False + print e + + finally: + c.disconnect() + + if passed: + print "\nTest has passed :-)\n" + else: + print "\nTest has failed :-(\n" + + +# run the tests +simple_burst() +---- +<1> import trex Stateless library +<2> create packet per direction using Scapy +<3> Connect/load/start to TRex +<4> This is something more advanced will be explained later + + +==== Tutorials HLT Python API + +HLT Python API is a layer on top the native layer. it support + +* Device Control +** connect +** cleanup_session +** device_info +** info +* Interface +** interface_config +** interface_stats +* Traffic +** traffic_config - not all arguments are supported +** traffic_control +** traffic_stats + + +file: link:{github_stl_examples_path}/hlt_udp_simple.py[hlt_udp_simple.py] + + +[source,python] +---- + +import sys +import argparse +import stl_path +from trex_stl_lib.api import * <1> +from trex_stl_lib.trex_stl_hltapi import * <2> + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(usage=""" + Connect to TRex and send burst of packets + + examples + + hlt_udp_simple.py -s 9000 -d 30 + + hlt_udp_simple.py -s 9000 -d 30 -rate_percent 10 + + hlt_udp_simple.py -s 300 -d 30 -rate_pps 5000000 + + hlt_udp_simple.py -s 800 -d 30 -rate_bps 500000000 --debug + + then run the simulator on the output + ./stl-sim -f example.yaml -o a.pcap ==> a.pcap include the packet + + """, + description="Example for TRex HLTAPI", + epilog=" based on hhaim's stl_run_udp_simple example"); + + parser.add_argument("--ip", + dest="ip", + help='Remote trex ip', + default="127.0.0.1", + type = str) + + parser.add_argument("-s", "--frame-size", + dest="frame_size", + help='L2 frame size in bytes without FCS', + default=60, + type = int,) + + parser.add_argument('-d','--duration', + dest='duration', + help='duration in second ', + default=10, + type = int,) + + parser.add_argument('--rate-pps', + dest='rate_pps', + help='speed in pps', + default="100") + + parser.add_argument('--src', + dest='src_mac', + help='src MAC', + default='00:50:56:b9:de:75') + + parser.add_argument('--dst', + dest='dst_mac', + help='dst MAC', + default='00:50:56:b9:34:f3') + + args = parser.parse_args(); + + hltapi = CTRexHltApi() + print 'Connecting to TRex' + res = hltapi.connect(device = args.ip, port_list = [0, 1], reset = True, break_locks = True) + check_res(res) + ports = res['port_handle'] + if len(ports) < 2: + error('Should have at least 2 ports for this test') + print 'Connected, acquired ports: %s' % ports + + print 'Creating traffic' + + res = hltapi.traffic_config(mode = 'create', bidirectional = True, + port_handle = ports[0], port_handle2 = ports[1], + frame_size = args.frame_size, + mac_src = args.src_mac, mac_dst = args.dst_mac, + mac_src2 = args.dst_mac, mac_dst2 = args.src_mac, + l3_protocol = 'ipv4', + ip_src_addr = '10.0.0.1', ip_src_mode = 'increment', ip_src_count = 254, + ip_dst_addr = '8.0.0.1', ip_dst_mode = 'increment', ip_dst_count = 254, + l4_protocol = 'udp', + udp_dst_port = 12, udp_src_port = 1025, + stream_id = 1, # temporary workaround, add_stream does not return stream_id + rate_pps = args.rate_pps, + ) + check_res(res) + + print 'Starting traffic' + res = hltapi.traffic_control(action = 'run', port_handle = ports[:2]) + check_res(res) + wait_with_progress(args.duration) + + print 'Stopping traffic' + res = hltapi.traffic_control(action = 'stop', port_handle = ports[:2]) + check_res(res) + + res = hltapi.traffic_stats(mode = 'aggregate', port_handle = ports[:2]) + check_res(res) + print_brief_stats(res) + + res = hltapi.cleanup_session(port_handle = 'all') + check_res(res) + + print 'Done' +---- +<1> import Native TRex API +<2> import HLT TRex -INFO: The SRC/DST MAC addrees are taken from /etc/trex_cfg.yaml. if you want to change them to be different just add Ether(dst="00:00:dd:dd:00:01") with your destination ==== Tutorial: Simple IPv4/UDP packet - Simulator -*Goal* : Learn how to use the TRex Stateless simulator,important for more complex use cases +Goal:: Demonstrates the most basic use case using our simulator -The following example demonstrates the most basic use case using our simulator. -file: `stl/udp_1pkt_simple.py` +file: link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] [source,python] ---- @@ -394,7 +828,7 @@ $./stl-sim -f stl/udp_1pkt_simple.py --json }, "next_stream_id": -1, "packet": { - "binary": "AAAAAQAAAAAAAgAACABFAAAmAAEAAEAROsUQAAABMAAAAQQBAAwAEmFheHh4eHh4eHh4eA==", + "binary": "AAAAAQAAAAAAAgAACABFAAAmAA", "meta": "" }, "rx_stats": { @@ -445,7 +879,7 @@ $./stl-sim -f stl/udp_1pkt_simple.py --yaml pps: 1.0 type: continuous packet: - binary: AAAAAQAAAAAAAgAACABFAAAmAAEAAEAROsUQAAABMAAAAQQBAAwAEmFheHh4eHh4eHh4eA== + binary: AAAAAQAAAAAAAgAACABFAAAmAAEAAEARO meta: '' rx_stats: enabled: false @@ -495,11 +929,81 @@ $./stl-sim -f stl/udp_1pkt_simple.py --pkt ---- +=== Traffic profile Tutorials + +==== Tutorial: Simple Interleave streams + +Goal:: Demonstrate number of interleave streams + +The following example demonstrates 3 streams with different rates (pps=10,20,40) and different start time ISG (0,25msec,50msec) + +file:: link:{github_stl_path}/simple_3pkt.py[stl/simple_3pkt.py] + +[source,python] +---- + def create_stream (self): + + # create a base packet and pad it to size + size = self.fsize - 4; # no FCS + base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) <1> + base_pkt1 = Ether()/IP(src="16.0.0.2",dst="48.0.0.1")/UDP(dport=12,sport=1025) + base_pkt2 = Ether()/IP(src="16.0.0.3",dst="48.0.0.1")/UDP(dport=12,sport=1025) + pad = max(0, size - len(base_pkt)) * 'x' + + + return STLProfile( [ STLStream( isg = 0.0, # 25msec + packet = STLPktBuilder(pkt = base_pkt/pad), + mode = STLTXCont( pps = 10), <2> + ), + + STLStream( isg = 25000.0, + packet = STLPktBuilder(pkt = base_pkt1/pad), + mode = STLTXCont( pps = 20), <3> + ), + + STLStream( isg = 50000.0, + packet = STLPktBuilder(pkt = base_pkt2/pad), + mode = STLTXCont( pps = 40) <4> + + ) + ]).get_streams() +---- +<1> Define template packets using scapy +<2> Define streams with rate of 10 +<3> Define streams with rate of 20 +<4> Define streams with rate of 40 + + +The output:: + +image::images/stl_inter.png[title="Interleave streams",align="left",width=600, link="images/stl_inter.png"] + +Discussion:: + +1. stream #1 has a packet each 100msec +2. stream #2 has a packet each 50msec +3. stream #3 has a packet each 25msec +4. Stream #2 start after 25msec relative to stream #1 +5. Stream #3 start after 50msec relative to stream #1 + +You can use the simulator to look into the details + +[source,bash] +---- +$./stl-sim -f stl/simple_3pkt.py -o b.pcap -l 200 +---- + +or run it from Console + +[source,bash] +---- +trex>start -f stl/simple_3pkt.py -m 10mbps -a +---- -==== Tutorial: Multi stream support -*Goal* : Send more than one stream +==== Tutorial: Multi stream support +Goal:: profile with stream trigger a stream The following example demonstrates @@ -508,7 +1012,7 @@ The following example demonstrates 3. Stream activate a Stream (self_start=False) -file: `stl/burst_3pkt_60pkt.py` +file: link:{github_stl_path}/burst_3pkt_60pkt.py[stl/burst_3pkt_60pkt.py] [source,python] @@ -566,7 +1070,7 @@ TRex>start -f stl/stl/burst_3pkt_600pkt.py --port 0 *Goal* : Learn Multi burst -file: `stl/multi_burst_2st_1000pkt.py` +file: link:{github_stl_path}/multi_burst_2st_1000pkt.py[stl/multi_burst_2st_1000pkt.py] [source,python] ---- @@ -607,7 +1111,7 @@ image::images/stl_tut_4.png[title="Streams example",align="left",width=600, link ==== Tutorial: Loops of streams -file: `stl/burst_3st_loop_x_times.py` +file: link:{github_stl_path}/burst_3st_loop_x_times.py[stl/burst_3st_loop_x_times.py] [source,python] ---- @@ -648,7 +1152,7 @@ file: `stl/burst_3st_loop_x_times.py` ==== Tutorial: IMIX with UDP packets directional -file: `stl/imix.py` +file: link:{github_stl_path}/imix.py[stl/imix.py] [source,python] ---- @@ -733,7 +1237,7 @@ for more info see link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_o The following example demonstrates creating SYN attack from many src to one server. -file: `stl/syn_attack.py` +file: link:{github_stl_path}/syn_attack.py[stl/syn_attack.py] [source,python] ---- @@ -800,7 +1304,7 @@ pkt,Client IPv4,Client Port The following example demonstrates creating multiply flow from the same packet template. The TupleGenerator instructions are used to create two stream variables with IP, port -file: `stl/udp_1pkt_tuple_gen.py` +file: link:{github_stl_path}/udp_1pkt_tuple_gen.py[stl/udp_1pkt_tuple_gen.py] [source,python] ---- @@ -856,7 +1360,7 @@ In this example MPLS label field will be changed. 0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1| |==== -file: `stl/udp_1pkt_mpls_vm.py` +file: link:{github_stl_path}/udp_1pkt_mpls_vm.py[stl/udp_1pkt_mpls_vm.py] [source,python] ---- @@ -898,7 +1402,7 @@ The way to do it is: 2. Trim the packet to the size you want 3. Update the packet fields to the new size -file: `stl/udp_rand_len_9k.py` +file: link:{github_stl_path}/udp_rand_len_9k.py[stl/udp_rand_len_9k.py] [source,python] ---- @@ -948,7 +1452,7 @@ The following example demonstrates a way to use a header the is not supported by In this case this is VXLAN -file: `stl/udp_1pkt_vxlan.py` +file: link:{github_stl_path}/udp_1pkt_vxlan.py[stl/udp_1pkt_vxlan.py] [source,python] @@ -1035,7 +1539,7 @@ To send gratuitous ARP from TRex server side for this server (58.0.0.1) Then traffic can be sent from client side A->C -file: `stl/udp_1pkt_range_clients_split.py` +file: link:{github_stl_path}/udp_1pkt_range_clients_split.py[stl/udp_1pkt_range_clients_split.py] [source,python] ---- @@ -1360,7 +1864,7 @@ In the above figure we would like to that stream S3 will start on all the thread There is a way to load *one* packet data into a stream. There is an assumption that this pcap. only the first packet from this pcap is taken. -file: `stl/udp_1pkt_pcap.py` +file: link:{github_stl_path}/udp_1pkt_pcap.py[stl/udp_1pkt_pcap.py] [source,python] ---- @@ -1374,7 +1878,7 @@ file: `stl/udp_1pkt_pcap.py` <1> packet is taken from pcap file relative to pwd of the script you run -file: `stl/udp_1pkt_pcap_relative_path.py` +file: link:{github_stl_path}/udp_1pkt_pcap_relative_path.py[udp_1pkt_pcap_relative_path.py] [source,python] @@ -1392,7 +1896,7 @@ file: `stl/udp_1pkt_pcap_relative_path.py` The following example demonstrates a way to load pcap with *number* of packets and for each packet create a stream with burst of 1. -file: `stl/pcap.py` +file: link:{github_stl_path}/pcap.py[pcap.py] [source,python] ---- @@ -1536,7 +2040,7 @@ $./stl-sim -f stl/pcap.py --yaml The following example demonstrates a way to load pcap file to many stream and attach to each stream a Field Engine program. For example change the IP.src of all the streams to a random number -file: `stl/pcap_with_vm.py` +file: link:{github_stl_path}/pcap_with_vm.py[stl/pcap_with_vm.py] [source,python] ---- @@ -1612,42 +2116,11 @@ pkt, IPv4 , flow |================= -==== Tutorial: Source and Destination MAC address - -Each TRex port has a source MAC configure and destination MAC (DUT) configured in /etc/trex_cfg.yaml -By default those MAC (source and destination) is taken -In case a user configures a source or destination MAC explicitly this MAC will override - - -.MAC addrees -[format="csv",cols="2^,2^,2^", options="header",width="40%"] -|================= -Scapy , Source MAC,Destination MAC -Ether() , trex_cfg,trex_cfg -Ether(src="00:bb:12:34:56:01"),"00:bb:12:34:56:01",trex_cfg -Ether(dst="00:bb:12:34:56:01"),trex_cfg,"00:bb:12:34:56:01" -|================= - -For example - -file: `stl/udp_1pkt_1mac_override.py` - - -[source,python] ----- - def create_stream (self): - - base_pkt = Ether(src="00:bb:12:34:56:01")/ <1> - IP(src="16.0.0.1",dst="48.0.0.1")/ - UDP(dport=12,sport=1025) ----- -<1> Don't take TRex port src interface MAC - ==== Tutorial: Teredo tunnel (IPv6 over IPv4) The following example demonstrates creating IPv6 packet inside IPv4 packet and create a range of IPs -file: `stl/udp_1pkt_ipv6_in_ipv4.py` +file: link:{github_stl_path}/udp_1pkt_ipv6_in_ipv4.py[stl/udp_1pkt_ipv6_in_ipv4.py] [source,python] ---- @@ -1965,7 +2438,7 @@ Under the hood there is a compiler that converts it to native scapy/field engine The support is limited to [TBD] this argument. -file: `stl/hlt/hlt_udp_inc_dec_len_9k.py` +file: link:{github_stl_path}/hlt/hlt_udp_inc_dec_len_9k.py[stl/hlt/hlt_udp_inc_dec_len_9k.py] [source,python] ---- @@ -2037,233 +2510,6 @@ TRex>start -f stl/hlt/hlt_udp_inc_dec_len_9k.py -m 10mbps -a more profiles and example can be found in `stl/hlt` folder - - -=== Tutorials Native Python API - - -Python API examples are located here: `automation/trex_control_plane/stl/examples` -Python API library is located here: `automation/trex_control_plane/stl/trex_stl_lib` - -The Console is using the library to interact with TRex server and protocol is JSON-RPC2 over ZMQ - -file: `stl_bi_dir_flows.py` - - -[source,python] ----- - -def simple_burst (): - - # create client - c = STLClient() # default user is $USER. Can be specified explicitly - passed = True - - try: - # turn this on for some information - #c.set_verbose("high") - - # create two streams - s1 = STLStream(packet = create_pkt(200, 0), - mode = STLTXCont(pps = 100)) - - # second stream with a phase of 1ms (inter stream gap) - s2 = STLStream(packet = create_pkt(200, 1), - isg = 1000, - mode = STLTXCont(pps = 100)) - - - # connect to server - c.connect() - - # prepare our ports (my machine has 0 <--> 1 with static route) - c.reset(ports = [0, 1]) # Acquire port 0,1 for $USER - - # add both streams to ports - c.add_streams(s1, ports = [0]) - c.add_streams(s2, ports = [1]) - - # clear the stats before injecting - c.clear_stats() - - # choose rate and start traffic for 10 seconds on 5 mpps - print "Running 5 Mpps on ports 0, 1 for 10 seconds..." - c.start(ports = [0, 1], mult = "5mpps", duration = 10) <1> - - # block until done - c.wait_on_traffic(ports = [0, 1]) - - # read the stats after the test - stats = c.get_stats() - - print json.dumps(stats[0], indent = 4, separators=(',', ': '), sort_keys = True) - print json.dumps(stats[1], indent = 4, separators=(',', ': '), sort_keys = True) - - lost_a = stats[0]["opackets"] - stats[1]["ipackets"] - lost_b = stats[1]["opackets"] - stats[0]["ipackets"] - - print "\npackets lost from 0 --> 1: {0} pkts".format(lost_a) - print "packets lost from 1 --> 0: {0} pkts".format(lost_b) - - if (lost_a == 0) and (lost_b == 0): - passed = True - else: - passed = False - - except STLError as e: - passed = False - print e - - finally: - c.disconnect() - - if passed: - print "\nTest has passed :-)\n" - else: - print "\nTest has failed :-(\n" - - -# run the tests -simple_burst() ----- -<1> Start can work on mask of ports - - -=== Tutorials HLT Python API - -HLT Python API is a layer on top the native layer. it support - -* Device Control -** connect -** cleanup_session -** device_info -** info -* Interface -** interface_config -** interface_stats -* Traffic -** traffic_config - not all arguments are supported -** traffic_control -** traffic_stats - - -file: `hlt_udp_simple.py` - - -[source,python] ----- - -import sys -import argparse -import stl_path -from trex_stl_lib.api import * <1> -from trex_stl_lib.trex_stl_hltapi import * <2> - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(usage=""" - Connect to TRex and send burst of packets - - examples - - hlt_udp_simple.py -s 9000 -d 30 - - hlt_udp_simple.py -s 9000 -d 30 -rate_percent 10 - - hlt_udp_simple.py -s 300 -d 30 -rate_pps 5000000 - - hlt_udp_simple.py -s 800 -d 30 -rate_bps 500000000 --debug - - then run the simulator on the output - ./stl-sim -f example.yaml -o a.pcap ==> a.pcap include the packet - - """, - description="Example for TRex HLTAPI", - epilog=" based on hhaim's stl_run_udp_simple example"); - - parser.add_argument("--ip", - dest="ip", - help='Remote trex ip', - default="127.0.0.1", - type = str) - - parser.add_argument("-s", "--frame-size", - dest="frame_size", - help='L2 frame size in bytes without FCS', - default=60, - type = int,) - - parser.add_argument('-d','--duration', - dest='duration', - help='duration in second ', - default=10, - type = int,) - - parser.add_argument('--rate-pps', - dest='rate_pps', - help='speed in pps', - default="100") - - parser.add_argument('--src', - dest='src_mac', - help='src MAC', - default='00:50:56:b9:de:75') - - parser.add_argument('--dst', - dest='dst_mac', - help='dst MAC', - default='00:50:56:b9:34:f3') - - args = parser.parse_args(); - - hltapi = CTRexHltApi() - print 'Connecting to TRex' - res = hltapi.connect(device = args.ip, port_list = [0, 1], reset = True, break_locks = True) - check_res(res) - ports = res['port_handle'] - if len(ports) < 2: - error('Should have at least 2 ports for this test') - print 'Connected, acquired ports: %s' % ports - - print 'Creating traffic' - - res = hltapi.traffic_config(mode = 'create', bidirectional = True, - port_handle = ports[0], port_handle2 = ports[1], - frame_size = args.frame_size, - mac_src = args.src_mac, mac_dst = args.dst_mac, - mac_src2 = args.dst_mac, mac_dst2 = args.src_mac, - l3_protocol = 'ipv4', - ip_src_addr = '10.0.0.1', ip_src_mode = 'increment', ip_src_count = 254, - ip_dst_addr = '8.0.0.1', ip_dst_mode = 'increment', ip_dst_count = 254, - l4_protocol = 'udp', - udp_dst_port = 12, udp_src_port = 1025, - stream_id = 1, # temporary workaround, add_stream does not return stream_id - rate_pps = args.rate_pps, - ) - check_res(res) - - print 'Starting traffic' - res = hltapi.traffic_control(action = 'run', port_handle = ports[:2]) - check_res(res) - wait_with_progress(args.duration) - - print 'Stopping traffic' - res = hltapi.traffic_control(action = 'stop', port_handle = ports[:2]) - check_res(res) - - res = hltapi.traffic_stats(mode = 'aggregate', port_handle = ports[:2]) - check_res(res) - print_brief_stats(res) - - res = hltapi.cleanup_session(port_handle = 'all') - check_res(res) - - print 'Done' ----- -<1> import Native TRex API -<2> import HLT TRex - - === Reference ==== Stream @@ -2649,7 +2895,6 @@ get keyboard === Appendix - ==== HLT supported Arguments diff --git a/images/stl_inter.png b/images/stl_inter.png new file mode 100644 index 00000000..0aeed52b Binary files /dev/null and b/images/stl_inter.png differ diff --git a/images/stl_streams_example.vsd b/images/stl_streams_example.vsd index fd60231d..38ff194b 100644 Binary files a/images/stl_streams_example.vsd and b/images/stl_streams_example.vsd differ -- cgit 1.2.3-korg From d5eeea85e4c623b81f034a67e0e5a8720755ce19 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 9 Mar 2016 14:30:23 +0200 Subject: minor fixes --- draft_trex_stateless.asciidoc | 459 +++++++++++++++++++++++++++--------------- waf.css | 4 + 2 files changed, 306 insertions(+), 157 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 42eadaee..15fa7136 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -18,8 +18,8 @@ TRex Stateless support * High scale - line rate 14MPPS per core, linear scale with number of cores * Support 1/10/25/40/100 Gb/sec interfaces -* Interface can configured with multi traffic profiles -* Profile can support multi streams. Scale to 10K streams in parallel +* Interface can be configured with multi traffic profiles +* Traffic Profile can support multi streams. Scale to 10K streams in parallel * Each Stream ** Packet template - ability to build any packet using Scapy (e.g. MPLS/IPv4/Ipv6/GRE/VXLAN/NSH) ** Field engine program @@ -66,17 +66,17 @@ TRex has limited functionality compared to IXIA, but has some advantages. The fo | Feature | IXExplorer |TRex | Description | Line rate | Yes |Almost ~15MPPS/core| | Multi stream | 255 | [green]*Unlimited* | -| Packet build flexibility | Limited | [green]*Scapy- Ulimited* | GRE/VXLAN/NSH is supported by TRex and can be extended to future protocols +| Packet build flexibility | Limited | [green]*Scapy- Ulimited* | e.g GRE/VXLAN/NSH is supported. Can be extended to future protocols | Packet Field engine | limited | [green]*Unlimited* | | Tx Mode | Continues/Burst/Multi burst | Continues/Burst/Multi burst| | ARP Emulation | Yes | Not yet - workaround | | Automation | TCL/Python wrapper to TCL | [green]*native Python/Scapy* | | Automation speed sec| 30sec | [green]*1msec* | test of load/start/stop/get counters -| HLTAPI | Full support 2000 page of documentation | Limited 20 page Of documentation| -| Per Stream statistic | 255 streams with 4 global mask | 128 rules for XL710/X710 hardware and software for 82599 I350| in case of XL710/X710 thre are some restrictions for the packet type to be recived +| HLTAPI | Full support. 2000 pages of documentation | Limited 20 page of documentation| +| Per Stream statistic | 255 streams with 4 global mask | 128 rules for XL710/X710 hardware and software impl for 82599/I350/X550| in case of XL710/X710 there are some restrictions for the packet type | Latency Jitter | Yes | Yes | | Multi user support | Yes | Yes | -| GUI | very good | not ready yet, packet build is scapy based, not the same as IXIA | +| GUI | very good | WIP, packet build is scapy based. Not the same as IXIA | | Cisco pyATS support | Yes | Yes Python 2.7, Python 64bits, WIP to port it to Python 3.0| | Emulation | Yes | Not yet | | Port Ids | Base on IXIA numebrs | Depends on PCI enumeration @@ -87,7 +87,7 @@ TRex has limited functionality compared to IXIA, but has some advantages. The fo To support interactive mode, JSON-RPC2 thread added to the TRex Control Plane core. -The following diagram illustrates the RPC server component's +The following diagram illustrates the RPC server/client components image::images/trex_2_stateless.png[title="RPC Server Position",align="left",width=800, link="images/trex_2_stateless.png"] @@ -97,12 +97,11 @@ image::images/trex_2_stateless.png[title="RPC Server Position",align="left",widt * Python is the first Client to implement the Python automation API * Console utilizes the Python API to implement a user interface to TRex * Number of users can control one TRex server in parallel as long as they control different Interfaces. TRex Interface can be acquired by a user. For example a TRex with four ports can be used by two users. User A can acquire Interface 0/ 1 and User B can acquire Interface 3/4 -* There could be only *one* Console/GUI control (R/W) entity for specific interfaces. So user A with two interfaces could have only one R/W Control session in specific time. By that we can cache the TRex Server interface information in Client Core. -* For one user there could be many read-only clients for getting statistics for same user same interfaces. +* There could be only *one* control Console/GUI (R/W) entity for a specific user. User A with two interfaces could have only one R/W Control session in specific time. By that we can cache the TRex Server interface information in the Client. +* For one user there could be many read-only clients for getting statistics. * Client should sync with the server to get the state in connection time and cache the server information locally once the state was changed -* In case of crash/exit of the Client it should sync again at connection time -* Client in R/W mode has the ability to get a statistic in real time (with ASYNC ZMQ). It gives the option to have number of ways to look into the statistics (GUI and Console) at the same time. - +* In case of crash/exit of the Client it should sync again at connection time. +* Client has the ability to get a statistic in real time (with ASYNC ZMQ). It gives the option to have number of ways to look into the statistics (GUI and Console) at the same time. image::images/trex_stateless_multi_user.png[title="Multi user-per interface",align="left",width=800, link="images/trex_stateless_multi_user.png"] @@ -114,28 +113,29 @@ This Architecture provides the following advantages: * Leveraging Python/Scapy for building a packet/Field engine * HLTAPI compiler complexity is done in Python -=== Objects +=== TRex Entities image::images/stateless_objects.png[title="TRex Objects ",align="left",width=600, link="images/stateless_objects.png"] -* *TRex*: Each TRex instance, includes a number of interfaces +* *TRex*: Each TRex instance includes a number of interfaces * *Interface*: For each Interface it is possible to add/remove a number of traffic profiles (TP) -* *Traffic profile*: Each traffic profile includes a number of streams +* *Traffic profile*: Each traffic profile includes a number of streams. This is the basic building block of activation. It is possible to add/remove to an interface a profile while other profile already exists. A profile can be looked as a "program" with dependency between streams. It is not possible to change a profile while it is running except changing the rates. * *Stream*: Each stream includes ** *Packet*: Packet template up to 9K bytes ** *Field Engine*: which field to change, do we want to change packet size ** *Mode*: how to send the packet. Continues/Burst/Multi Burst ** *Rx Stats* Which Statstistic to collect for each stream -** *Rate*: in Packet per second or bandwidth +** *Rate*: Specified in Packet Per Second (pps) or bandwidth (bps) ** *Action*: The next stream to go after this stream is finished. Valid for Burst/Continues mode + ==== TRex package folders -[cols="1,5", options="header"] -|================= +[cols="1,5", options="header",width="80%"] +|============================= | Location | Description | / | t-rex-64/dpdk_set_ports/stl-sim -| /stl | Stateless py example profiles +| /stl | Stateless native (py) profiles | /stl/yaml | Stateless YAML profiles | /stl/htl | Stateless HTL profiles | /ko | Kernel modules for DPDK @@ -158,11 +158,13 @@ This tutorial will walk you through basic but complete TRex Stateless use cases ==== Tutorial: Simple IPv4/UDP packet - TRex -Goal:: send simple UDP packet from all the ports +*Goal*:: send a simple UDP packet from all the ports -Traffic profile:: +*Traffic profile*:: -file: link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] +Traffic profile (TP) is a way to define *how* to generate the traffic. It defines the traffic templates the rate the mode and which fields in the packet to change. The following example defines a profile with one stream. The stream is with IP/UDP packet template with 10 bytes of 'x'(0x78) of payload. to get more example how to define packets using scapy see here link:http://www.secdev.org/projects/scapy/doc/[Scapy] + +*file*:: link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] [source,python] ---- @@ -190,30 +192,32 @@ class STLS1(object): def register(): <4> return STLS1() ---- -<1> Define the packet, in this case it IP/UDP with 10 bytes of 'x'.See more here link:http://www.secdev.org/projects/scapy/doc/[Scapy] -<2> Mode is Continues with rate of 1 PPS (default rate is 1 PPS) +<1> Define the packet, in this case it IP/UDP with 10 bytes of 'x'(0x78) .See more here link:http://www.secdev.org/projects/scapy/doc/[Scapy] +<2> Mode is Continues with a rate of 1 pps (default rate is 1 PPS) <3> get_streams function is mandatory <4> Each Traffic profile module should have a `register` function -Run TRex as a server mode:: +*Start TRex as a server*:: + +[NOTE] +===================================================================== +There is no need to install any python packages (including scapy). just download the TRex package +===================================================================== -First run trex in interactive mode [source,bash] ---- $sudo ./t-rex-64 -i ---- -Connect with Console:: - -From the same machine in a different terminal connect to trex (you can do it from remote machine with -s [ip]) +*Connect with Console*:: -from console you can run this +From the same machine in a different terminal (either open a new window using `xterm`, or `ssh` again) run the folowing command [source,bash] ---- -$trex-console +$trex-console #<1> Connecting to RPC server on localhost:4501 [SUCCESS] connecting to publisher server on localhost:4500 [SUCCESS] @@ -221,28 +225,29 @@ Acquiring ports [0, 1, 2, 3]: [SUCCESS] 125.69 [ms] -trex> start -f stl/udp_1pkt_simple.py -m 10mbps -a #<1> +trex>start -f stl/udp_1pkt_simple.py -m 10mbps -a #<2> Removing all streams from port(s) [0, 1, 2, 3]: [SUCCESS] Attaching 1 streams to port(s) [0, 1, 2, 3]: [SUCCESS] Starting traffic on port(s) [0, 1, 2, 3]: [SUCCESS] # pause the traffic on all port ->pause -a #<2> +>pause -a #<3> # resume the traffic on all port ->resume -a #<3> +>resume -a #<4> # stop traffic on all port ->stop -a #<4> +>stop -a #<5> # show dynamic statistic >tui ---- -<1> Start the traffic on all the ports in 10mbps. you can try with 14MPPS -<2> Pause the traffic -<3> Resume -<4> Stop on all the ports +<1> Connect to TRex server assume server at local machine +<2> Start the traffic on all the ports in 10mbps. you can try with 14MPPS +<3> Pause the traffic +<4> Resume +<5> Stop on all the ports To look into the streams using `streams -a` @@ -250,7 +255,7 @@ To look into the streams using `streams -a` .Streams [source,bash] ---- -trex> streams -a +trex>streams -a Port 0: ID | packet type | length | mode | rate | next stream @@ -336,11 +341,11 @@ Port Statistics The SRC/DST MAC addrees are taken from /etc/trex_cfg.yaml. if you want to change them to be different just add Ether(dst="00:00:dd:dd:00:01") with your destination ===================================================================== -==== Tutorial: Connect from remote server +==== Tutorial: Connect from a remote server -Goal:: Client will connect from remote machine to TRex server +*Goal*:: Console connect from a remote machine to TRex server -TRex server is up:: +*Check that TRex server is up*:: Make sure TRex server is running, if not run trex in interactive mode @@ -349,14 +354,15 @@ Make sure TRex server is running, if not run trex in interactive mode $sudo ./t-rex-64 -i ---- -Connect with Console:: +*Connect with Console*:: From remote machine you can run this with `-s` flag [source,bash] ---- -$trex-console -s csi-kiwi-02 +$trex-console -s csi-kiwi-02 #<1> ---- +<1> TRex server is csi-kiwi-02 if the default python is not 64bit/2.7.x you can change the *PYTHON* environment variable using @@ -374,12 +380,12 @@ extern PYTHON=/bin/mypython #bash [NOTE] ===================================================================== -Client machine should run Python 2.7 and Python 64bit version. Cisco CEL/ADS is supported +Client machine should run Python 2.7 and Python 64bit version. Cisco CEL/ADS is supported. Python 3.0 support in WIP ===================================================================== ==== Tutorial: Source and Destination MAC address -Goal:: Change source/destination MAC addrees +*Goal*:: Change source/destination MAC addrees Each TRex port has a source and destination MAC (DUT) configured in /etc/trex_cfg.yaml. The source MAC is not necessarily the hardware MAC address configured in eeprom. @@ -388,18 +394,17 @@ In case a user configures a source or destination MAC explicitly this MAC will t .MAC addrees -[format="csv",cols="2^,2^,2^", options="header",width="40%"] +[format="csv",cols="2^,2^,2^", options="header",width="50%"] |================= Scapy , Source MAC,Destination MAC -Ether() , trex_cfg,trex_cfg -Ether(src="00:bb:12:34:56:01"),"00:bb:12:34:56:01",trex_cfg -Ether(dst="00:bb:12:34:56:01"),trex_cfg,"00:bb:12:34:56:01" +Ether() , trex_cfg (src),trex_cfg(dst) +Ether(src="00:bb:12:34:56:01"),"00:bb:12:34:56:01",trex_cfg(dst) +Ether(dst="00:bb:12:34:56:01"),trex_cfg(src),"00:bb:12:34:56:01" |================= For example -file: link:{github_stl_path}/udp_1pkt_1mac_override.py[stl/udp_1pkt_1mac_override.py] - +*file*:: link:{github_stl_path}/udp_1pkt_1mac_override.py[stl/udp_1pkt_1mac_override.py] [source,python] ---- @@ -413,7 +418,7 @@ file: link:{github_stl_path}/udp_1pkt_1mac_override.py[stl/udp_1pkt_1mac_overrid [IMPORTANT] ===================================== -TRex ports will receive a packet only when the packet will have a destination MAC of the `/etc/trex_cfg.yaml`. To configure the port to be promiscuous and get all the packets on the line you can configure it from API or from Console with `portattr -a --prom` +TRex ports will receive a packet only when the packet will have a destination MAC of port defined in the `/etc/trex_cfg.yaml`. To configure the port to be promiscuous and get all the packets on the line you can configure it from API or from Console with `portattr -a --prom` ===================================== To show the port mode @@ -443,24 +448,26 @@ NUMA Node | 0 | 0 | ==== Tutorial: Python automation -Goal:: Simple automation test using Python from remote or local machine +*Goal*:: Simple automation test using Python from remote or local machine + +Python API examples are located here: `automation/trex_control_plane/stl/examples`. -Python API examples are located here: `automation/trex_control_plane/stl/examples` Python API library is located here: `automation/trex_control_plane/stl/trex_stl_lib` + The Console is using the python API library to interact with TRex server and the protocol is JSON-RPC2 over ZMQ -file: link:{github_stl_examples_path}/stl_bi_dir_flows.py[stl_bi_dir_flows.py] +*file*:: link:{github_stl_examples_path}/stl_bi_dir_flows.py[stl_bi_dir_flows.py] [source,python] ---- -import stl_path -from trex_stl_lib.api import * <1> +import stl_path <1> +from trex_stl_lib.api import * <2> import time import json -# simple packet creation <2> +# simple packet creation <3> def create_pkt (size, direction): ip_range = {'src': {'start': "10.0.0.1", 'end': "10.0.0.254"}, @@ -499,7 +506,7 @@ def create_pkt (size, direction): return STLPktBuilder(pkt = base/pad, vm = vm) - <3> + <5> def simple_burst (): # create client @@ -525,10 +532,10 @@ def simple_burst (): # connect to server - c.connect() - + c.connect() <5> + # prepare our ports (my machine has 0 <--> 1 with static route) - c.reset(ports = [0, 1]) # Acquire port 0,1 for $USER + c.reset(ports = [0, 1]) # Acquire port 0,1 for $USER <6> # add both streams to ports c.add_streams(s1, ports = [0]) @@ -539,19 +546,19 @@ def simple_burst (): # choose rate and start traffic for 10 seconds on 5 mpps print "Running 5 Mpps on ports 0, 1 for 10 seconds..." - c.start(ports = [0, 1], mult = "5mpps", duration = 10) <1> + c.start(ports = [0, 1], mult = "5mpps", duration = 10) <7> # block until done - c.wait_on_traffic(ports = [0, 1]) + c.wait_on_traffic(ports = [0, 1]) <8> # read the stats after the test - stats = c.get_stats() + stats = c.get_stats() <9> print json.dumps(stats[0], indent = 4, separators=(',', ': '), sort_keys = True) print json.dumps(stats[1], indent = 4, separators=(',', ': '), sort_keys = True) lost_a = stats[0]["opackets"] - stats[1]["ipackets"] - lost_b = stats[1]["opackets"] - stats[0]["ipackets"] + lost_b = stats[1]["opackets"] - stats[0]["ipackets"] print "\npackets lost from 0 --> 1: {0} pkts".format(lost_a) print "packets lost from 1 --> 0: {0} pkts".format(lost_b) @@ -566,7 +573,7 @@ def simple_burst (): print e finally: - c.disconnect() + c.disconnect() <10> if passed: print "\nTest has passed :-)\n" @@ -577,15 +584,26 @@ def simple_burst (): # run the tests simple_burst() ---- -<1> import trex Stateless library -<2> create packet per direction using Scapy -<3> Connect/load/start to TRex +<1> import the stl_path. you should *fix* the path to point to your stl_trex library path +<2> import trex Stateless library. path should be fixed +<3> create packet per direction using Scapy <4> This is something more advanced will be explained later +<5> Connect to local TRex username , server can be added +<6> Acquire the ports +<7> load the profile and start the traffic +<8> Wait for the traffic to, be finished. There is a polling function so you can test do something while waiting +<9> Get port statistics +<10> Disconnect ==== Tutorials HLT Python API -HLT Python API is a layer on top the native layer. it support +HLT Python API is a layer on top the native layer. It supports the standard Cisco traffic generator API +See more in Cisco/IXIA/Spirent documentation +TRex supported a limited number of HLTAPI arguments and the recommendation is to use the native API due to the flexibility and simplicity. +IXIA for example, has a book of ~2000 pages for specifying all the HLTAPI mode of operations. One of the reasons for the 2000 pages is that in the API there is no clear separation between the definition of the template packet, and the fields that need to be changed and the mode of transmission. This creates a bloat of arguments that need to be documented. + +The supported classs are: * Device Control ** connect @@ -601,7 +619,7 @@ HLT Python API is a layer on top the native layer. it support ** traffic_stats -file: link:{github_stl_examples_path}/hlt_udp_simple.py[hlt_udp_simple.py] +*file*:: link:{github_stl_examples_path}/hlt_udp_simple.py[hlt_udp_simple.py] [source,python] @@ -721,10 +739,24 @@ if __name__ == "__main__": ==== Tutorial: Simple IPv4/UDP packet - Simulator -Goal:: Demonstrates the most basic use case using our simulator +*Goal*:: Demonstrates the most basic use case using TRex simulator + + +The simulator is a tool called `stil-sim` that is part of the TRex package. +It is a python script that calls an executable. +The executable should run on the same machine that TRex image run (it won't run on an older Linux distributions). + +Using the simulator you can : + +* Test your traffic profiles before running it on TRex. +* It can generate the output pcap file +* Simulate number of threads +* Convert from one type of profile to another +* Convert any profile to JSON (API) +let's take this profile -file: link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] +*file*:: link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] [source,python] ---- @@ -756,8 +788,7 @@ def register(): <2> Mode is Continues with rate of 1 PPS (default rate is 1 PPS) <3> Each Traffic profile module should have a `register` function - -Now let try to run it throw TRex simulator limiting the number of packet to 10 +Now let try to run it throw TRex simulator limiting the number of packets to 10 [source,bash] ---- @@ -799,8 +830,9 @@ $ ./stl-sim -f stl/udp_1pkt_simple.py -o b.pcap -l 10 ---- -image::images/stl_tut_1.png[title="Wireshark Tutorial 1 output",align="left",width=800, link="images/stl_tut_1.png.png"] +The following figure presents the output pcap file +image::images/stl_tut_1.png[title="Wireshark Tutorial 1 output",align="left",width=800, link="images/stl_tut_1.png.png"] .To look into the JSON command to the server [source,bash] @@ -860,13 +892,11 @@ $./stl-sim -f stl/udp_1pkt_simple.py --json } } ] - ---- For more detailed on Stream definition see RPC specification link:trex_rpc_server_spec.html#_add_stream[here] - -.To look into the YAML profile +.To convert the profile into YAML format [source,bash] ---- $./stl-sim -f stl/udp_1pkt_simple.py --yaml @@ -889,8 +919,8 @@ $./stl-sim -f stl/udp_1pkt_simple.py --yaml split_by_var: '' ---- +To look into the Packet detail try --pkt option (using scapy) -.To look into the Packet detail try --pkt option [source,bash] ---- $./stl-sim -f stl/udp_1pkt_simple.py --pkt @@ -928,16 +958,80 @@ $./stl-sim -f stl/udp_1pkt_simple.py --pkt 0030 78 78 78 78 xxxx ---- +To convert any profile type to native again use the `--native` option + +.Input YAML format +[source,python] +---- +$more stl/yaml/imix_1pkt.yaml +- name: udp_64B + stream: + self_start: True + packet: + pcap: udp_64B_no_crc.pcap # pcap should not include CRC + mode: + type: continuous + pps: 100 +---- + +.Convert to Native +[source,bash] +---- +$./stl-sim -f stl/yaml/imix_1pkt.yaml --native +---- + + +.Output Native +[source,python] +---- +# !!! Auto-generated code !!! +from trex_stl_lib.api import * + +class STLS1(object): + def get_streams(self): + streams = [] + + packet = (Ether(src='00:de:01:0a:01:00', dst='00:50:56:80:0d:28', type=2048) / + IP(src='101.0.0.1', proto=17, dst='102.0.0.1', chksum=28605, len=46, flags=2L, ihl=5L, id=0) / + UDP(dport=2001, sport=2001, len=26, chksum=1176) / + Raw(load='\xde\xad\xbe\xef\x00\x01\x06\x07\x08\x09\x0a\x0b\x00\x9b\xe7\xdb\x82M')) + vm = CTRexScRaw([], split_by_field = '') + stream = STLStream(packet = CScapyTRexPktBuilder(pkt = packet, vm = vm), + name = 'udp_64B', + mac_src_override_by_pkt = 0, + mac_dst_override_mode = 0, + mode = STLTXCont(pps = 100)) + streams.append(stream) + + return streams + +def register(): + return STLS1() +---- + +*Discussion*:: + +The following are the main traffic profiles formats. The native is the preferred one. There is a separation between how the traffic is defined and how to control/activate it. The API/Console/GUI can load a traffic profile and start/stop/get a statistic. Due to this separation it is possible to share traffic profiles. + +.Traffic profiles formats +[cols="1^,1^,10<", options="header",width="80%"] +|================= +| Profile Type | Format | Description +| Native | Python | A native Python like. Have the most flexibility. any format can be converted to native using `stl-sim` using --native option +| HLT | Python | HLT arguments like +| YAML | YAML | It is the common denominator traffic profile. We suggest not to use it by human as it is not possible to compose packet using scapy. it is used to move profile between GUI and Console or API. It can be converted to native using the stl-sim using --native switch +|================= + === Traffic profile Tutorials ==== Tutorial: Simple Interleave streams -Goal:: Demonstrate number of interleave streams +*Goal*:: Demonstrate number of interleave streams The following example demonstrates 3 streams with different rates (pps=10,20,40) and different start time ISG (0,25msec,50msec) -file:: link:{github_stl_path}/simple_3pkt.py[stl/simple_3pkt.py] +*file*:: link:{github_stl_path}/simple_3pkt.py[stl/simple_3pkt.py] [source,python] ---- @@ -951,17 +1045,17 @@ file:: link:{github_stl_path}/simple_3pkt.py[stl/simple_3pkt.py] pad = max(0, size - len(base_pkt)) * 'x' - return STLProfile( [ STLStream( isg = 0.0, # 25msec + return STLProfile( [ STLStream( isg = 0.0, packet = STLPktBuilder(pkt = base_pkt/pad), mode = STLTXCont( pps = 10), <2> ), - STLStream( isg = 25000.0, + STLStream( isg = 25000.0, #defined in usec, 25 msec packet = STLPktBuilder(pkt = base_pkt1/pad), mode = STLTXCont( pps = 20), <3> ), - STLStream( isg = 50000.0, + STLStream( isg = 50000.0,#defined in usec, 50 msec packet = STLPktBuilder(pkt = base_pkt2/pad), mode = STLTXCont( pps = 40) <4> @@ -975,35 +1069,35 @@ file:: link:{github_stl_path}/simple_3pkt.py[stl/simple_3pkt.py] The output:: +The folowing figure present the output image::images/stl_inter.png[title="Interleave streams",align="left",width=600, link="images/stl_inter.png"] - + Discussion:: -1. stream #1 has a packet each 100msec -2. stream #2 has a packet each 50msec -3. stream #3 has a packet each 25msec +1. stream #1 schedule a packet each 100msec +2. stream #2 schedule a packet each 50msec +3. stream #3 schedule a packet each 25msec 4. Stream #2 start after 25msec relative to stream #1 5. Stream #3 start after 50msec relative to stream #1 -You can use the simulator to look into the details +You can use the simulator to look into the details (pcap file) [source,bash] ---- $./stl-sim -f stl/simple_3pkt.py -o b.pcap -l 200 ---- -or run it from Console +or run it from Console on a TRex [source,bash] ---- trex>start -f stl/simple_3pkt.py -m 10mbps -a ---- +==== Tutorial: Multi burst streams - action next stream -==== Tutorial: Multi stream support - -Goal:: profile with stream trigger a stream +*Goal*:: profile with stream that trigger a stream The following example demonstrates @@ -1011,8 +1105,7 @@ The following example demonstrates 2. Burst of 10 packets 3. Stream activate a Stream (self_start=False) - -file: link:{github_stl_path}/burst_3pkt_60pkt.py[stl/burst_3pkt_60pkt.py] +*file*:: link:{github_stl_path}/burst_3pkt_60pkt.py[stl/burst_3pkt_60pkt.py] [source,python] @@ -1051,26 +1144,27 @@ file: link:{github_stl_path}/burst_3pkt_60pkt.py[stl/burst_3pkt_60pkt.py] <2> S1 with self_start=False. S0 activate it <3> S2 is activate by S1 +To run the simulator run this command + [source,bash] ---- $ ./stl-sim -f stl/stl/burst_3pkt_600pkt.py -o b.pcap ---- -The pcap file has 60 packet. The first 10 packets has src_ip=16.0.0.1. The next 10 packets has src_ip=16.0.0.2. The next 10 packets has src_ip=16.0.0.3 +The pcap file should have 60 packets. The first 10 packets has src_ip=16.0.0.1. The next 20 packets has src_ip=16.0.0.2. The next 30 packets has src_ip=16.0.0.3 -This profile can be run from Console using thed command +This profile can be run from Console using this command [source,bash] ---- TRex>start -f stl/stl/burst_3pkt_600pkt.py --port 0 ---- - ==== Tutorial: Multi Burst mode -*Goal* : Learn Multi burst +*Goal* : Learn Multi burst transmit mode -file: link:{github_stl_path}/multi_burst_2st_1000pkt.py[stl/multi_burst_2st_1000pkt.py] +*file*:: link:{github_stl_path}/multi_burst_2st_1000pkt.py[stl/multi_burst_2st_1000pkt.py] [source,python] ---- @@ -1103,7 +1197,7 @@ file: link:{github_stl_path}/multi_burst_2st_1000pkt.py[stl/multi_burst_2st_1000 ---- <1> Stream S0 wait 10 usec(isg) and send burst of 10 packet in 10 PPS rate -<2> Multi burst of 5 Burst of 4 packet with inter burst gap of one second +<2> Multi burst of 5 bursts of 4 packets with a inter burst gap of one second image::images/stl_tut_4.png[title="Streams example",align="left",width=600, link="images/stl_tut_4.png"] @@ -1111,7 +1205,9 @@ image::images/stl_tut_4.png[title="Streams example",align="left",width=600, link ==== Tutorial: Loops of streams -file: link:{github_stl_path}/burst_3st_loop_x_times.py[stl/burst_3st_loop_x_times.py] +*Goal* : Demonstrate a limited loop of streams + +*file*:: link:{github_stl_path}/burst_3st_loop_x_times.py[stl/burst_3st_loop_x_times.py] [source,python] ---- @@ -1152,7 +1248,11 @@ file: link:{github_stl_path}/burst_3st_loop_x_times.py[stl/burst_3st_loop_x_time ==== Tutorial: IMIX with UDP packets directional -file: link:{github_stl_path}/imix.py[stl/imix.py] +*Goal* : Demonstrate how to create IMIX + +This profile has 3 streams each with different size packet. The rate is different for each stream/size see link:https://en.wikipedia.org/wiki/Internet_Mix[here] + +*file*:: link:{github_stl_path}/imix.py[stl/imix.py] [source,python] ---- @@ -1219,11 +1319,10 @@ file: link:{github_stl_path}/imix.py[stl/imix.py] <2> Even port id has direction==0 and odd has direction==1 <3> We didn't explain this yet. but this is a Field Engine program to change fields inside the packets - ==== Tutorial: Field Engine, Syn attack The following example demonstrates changing packet fields. -The Field Engine (FE) has limited number of instructions/operation for supporting most use cases. There is a plan to add LuaJIT to get 100% flexiable in the cost of performance. +The Field Engine (FE) has limited number of instructions/operation for supporting most use cases. There is a plan to add LuaJIT to be more flexiable in the cost of performance. The FE can allocate stream variable in Stream context. Write a stream variable to a packet offset, change packet size etc. *Some examples for what can be done:* @@ -1237,7 +1336,7 @@ for more info see link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_o The following example demonstrates creating SYN attack from many src to one server. -file: link:{github_stl_path}/syn_attack.py[stl/syn_attack.py] +*file*:: link:{github_stl_path}/syn_attack.py[stl/syn_attack.py] [source,python] ---- @@ -1278,7 +1377,7 @@ file: link:{github_stl_path}/syn_attack.py[stl/syn_attack.py] <1> Create SYN packet using Scapy <2> Define stream variable name=ip_src, 4 bytes size for IPv4. <3> Define stream variable name=src_port, 2 bytes size for port. -<4> Write ip_src var into `IP.src` packet offset. Scapy calculate the offset. We could gave `IP:1.src" for second IP header in the packet +<4> Write ip_src stream var into `IP.src` packet offset. Scapy calculate the offset. We could gave `IP:1.src" for second IP header in the packet <5> Fix IPv4 checksum. here we provide the header name `IP` we could gave `IP:1` for second IP <6> Update TCP src port- TCP checksum is not updated here @@ -1287,7 +1386,7 @@ WARNING: Original Scapy does not have the capability to calculate offset for a h The output pcap file field can be seen here .Pcap file output -[format="csv",cols="1^,2^,2^", options="header",width="40%"] +[format="csv",cols="1^,2<,2<", options="header",width="40%"] |================= pkt,Client IPv4,Client Port 1 , 17.152.71.218 , 5814 @@ -1301,10 +1400,10 @@ pkt,Client IPv4,Client Port ==== Tutorial: Field Engine, Tuple Generator -The following example demonstrates creating multiply flow from the same packet template. -The TupleGenerator instructions are used to create two stream variables with IP, port +The following example demonstrates creating multiply flows from the same packet template. +The TupleGenerator instructions are used to create two stream variables with IP, port see link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_obj_a[here] -file: link:{github_stl_path}/udp_1pkt_tuple_gen.py[stl/udp_1pkt_tuple_gen.py] +*file*:: link:{github_stl_path}/udp_1pkt_tuple_gen.py[stl/udp_1pkt_tuple_gen.py] [source,python] ---- @@ -1344,7 +1443,7 @@ pkt,Client IPv4,Client Port 6 , 16.0.0.2, 1027 |================= -* Number of clients are two 16.0.0.1 and 16.0.0.2 +* Number of clients are two. 16.0.0.1 and 16.0.0.2 * Number of flows is limited to 129020 (2*65535-1025) * The stream variable size should match the size of the FlowVarWr instruction @@ -1360,7 +1459,7 @@ In this example MPLS label field will be changed. 0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1| |==== -file: link:{github_stl_path}/udp_1pkt_mpls_vm.py[stl/udp_1pkt_mpls_vm.py] +*file*:: link:{github_stl_path}/udp_1pkt_mpls_vm.py[stl/udp_1pkt_mpls_vm.py] [source,python] ---- @@ -1402,7 +1501,7 @@ The way to do it is: 2. Trim the packet to the size you want 3. Update the packet fields to the new size -file: link:{github_stl_path}/udp_rand_len_9k.py[stl/udp_rand_len_9k.py] +*file*:: link:{github_stl_path}/udp_rand_len_9k.py[stl/udp_rand_len_9k.py] [source,python] ---- @@ -1448,12 +1547,10 @@ file: link:{github_stl_path}/udp_rand_len_9k.py[stl/udp_rand_len_9k.py] ==== Tutorial: New Scapy header -The following example demonstrates a way to use a header the is not supported by Scapy. -In this case this is VXLAN - - -file: link:{github_stl_path}/udp_1pkt_vxlan.py[stl/udp_1pkt_vxlan.py] +The following example demonstrates a way to use a header that is not supported by Scapy in default. +In this example we will show VXLAN support. +*file*:: link:{github_stl_path}/udp_1pkt_vxlan.py[stl/udp_1pkt_vxlan.py] [source,python] ---- @@ -1506,22 +1603,22 @@ For more information how to define headers see Scapy link:http://www.secdev.org/ ==== Tutorial: Field Engine, Many clients The following example demonstrates a way to generate traffic from many clients with different IP/MAC to one server. -The following figure demonstrate what e want to achieve +The following figure shows it. image::images/stl_tut_12.png[title="client->server",align="left",width=600, link="images/stl_tut_12.png"] -1. Send gratuitous ARP from B->D with server IP/MAC -2. DUT learn the ARP of Server IP/MAC +1. Send gratuitous ARP from B->D with server IP/MAC (58.55.1.1) +2. DUT learn the ARP of Server IP/MAC (58.55.1.1) 3. Send traffic from A->C with many Clients IP's/MAC's Let's take an example: Base source IPv4 : 55.55.1.1 -Destination IPv4: 58.0.0.1 +Destination IPv4: 58.55.1.1 Increment src ipt portion starting at 55.55.1.1 for 'n' number of clients (55.55.1.1, 55.55.1.2) Src MAC: start with 0000.dddd.0001, increment mac in steps of 1 -Dst MAC: Fixed - will be taken from trex_conf.yaml +Dst MAC: Fixed - 58.55.1.1 To send gratuitous ARP from TRex server side for this server (58.0.0.1) @@ -1529,17 +1626,17 @@ To send gratuitous ARP from TRex server side for this server (58.0.0.1) ---- def create_stream (self): # create a base packet and pad it to size - base_pkt = Ether(src="00:00:dd:dd:00:01", + base_pkt = Ether(src="00:00:dd:dd:01:01", dst="ff:ff:ff:ff:ff:ff")/ - ARP(psrc="58.0.0.1", - hwsrc="00:00:dd:dd:00:01", - hwdst="00:00:dd:dd:00:01", - pdst="58.0.0.1") + ARP(psrc="58.55.1.1", + hwsrc="00:00:dd:dd:01:01", + hwdst="00:00:dd:dd:01:01", + pdst="58.55.1.1") ---- Then traffic can be sent from client side A->C -file: link:{github_stl_path}/udp_1pkt_range_clients_split.py[stl/udp_1pkt_range_clients_split.py] +*file*:: link:{github_stl_path}/udp_1pkt_range_clients_split.py[stl/udp_1pkt_range_clients_split.py] [source,python] ---- @@ -1554,7 +1651,7 @@ class STLS1(object): # create a base packet and pad it to size size = self.fsize - 4; # no FCS base_pkt = Ether(src="00:00:dd:dd:00:01")/ - IP(src="55.55.1.1",dst="58.0.0.1")/UDP(dport=12,sport=1025) + IP(src="55.55.1.1",dst="58.55.1.1")/UDP(dport=12,sport=1025) pad = max(0, size - len(base_pkt)) * 'x' vm = CTRexScRaw( [ STLVmFlowVar(name="mac_src", @@ -1581,10 +1678,10 @@ class STLS1(object): ==== Tutorial: Field Engine, Split to core The following example demonstrates a way to split generated traffic to a number of threads. -Using this feature, there is a way to specify by each field to split the traffic to threads. +Using this feature, there is a way to specify by which field to split the traffic to threads. Without this feature the traffic is duplicated and all the threads transmits the same traffic. -===== Without Split +*Without Split*:: Let's assume we have two transmitters DP threads @@ -1630,7 +1727,7 @@ pkt, thread-0 ip_src,thread-1 ip_src * In this case all the threads transmit the same packets -===== With Split feature +*With Split feature enabled*:: Let's assume we have two transmitters DP threads @@ -1694,13 +1791,11 @@ pkt, thread-0 ip_src,thread-1 ip_src 6 , 55.55.0.6 , 55.55.58.158 |================= +*Some rules about Split stream varibles and burst/multi-burst*:: - -===== Some rules about Split stream varibles and burst/multi-burst - -* In case of burst/multi-burst the number of packets are split to number of threads in *default* there is no need an explict split -* When the number of packets in a burst is smaller than the number of threads only one thread will do the work. -* In case there is stream with burst of *1* packet, only the first DP thread will do the work. +* In case of burst/multi-burst the number of packets are split to number of threads in *default* there is no need an explict split it. +* When the number of packets in a burst is smaller than the number of threads only one thread will do the work. +* In case there is a stream with burst of *1* packet, only the first DP thread will do the work. ==== Tutorial: Field Engine, Split to core with Burst @@ -1708,14 +1803,13 @@ The following example demonstrates a way to split generated traffic to a number In both cases the number of packets would be split into threads. Using this feature, The Field engine will be split too. -===== Without Split +*Without Split*:: In this example: * Number of threads are two * Split is not configured - [source,python] ---- # no split @@ -1768,13 +1862,13 @@ pkt, thread-0 ip_src,thread-1 ip_src 10 , 16.0.0.10, 16.0.0.10 |================= -*The results:* +*The results*:: * Total packets are 20 as expected, 10 generated by each thread * Field engine is the same for both threads -===== With Split +*With Split feature enabled*:: [source,python] ---- @@ -1830,12 +1924,11 @@ pkt, thread-0 ip_src,thread-1 ip_src 10 , 16.0.0.10, 17.0.0.137 |================= -*The results:* +*The results*:: * Total packets are 20 as expected, 10 generated by each thread * Field engine is *not* the same for both threads. - ==== Tutorial: Field Engine, Null stream The following example demonstrates a way create a Stream with no packets. The use cases is to use the Null stream inter stream gap (ISG) and then go to a new stream. @@ -1864,7 +1957,7 @@ In the above figure we would like to that stream S3 will start on all the thread There is a way to load *one* packet data into a stream. There is an assumption that this pcap. only the first packet from this pcap is taken. -file: link:{github_stl_path}/udp_1pkt_pcap.py[stl/udp_1pkt_pcap.py] +*file*:: link:{github_stl_path}/udp_1pkt_pcap.py[stl/udp_1pkt_pcap.py] [source,python] ---- @@ -1878,7 +1971,7 @@ file: link:{github_stl_path}/udp_1pkt_pcap.py[stl/udp_1pkt_pcap.py] <1> packet is taken from pcap file relative to pwd of the script you run -file: link:{github_stl_path}/udp_1pkt_pcap_relative_path.py[udp_1pkt_pcap_relative_path.py] +*file*:: link:{github_stl_path}/udp_1pkt_pcap_relative_path.py[udp_1pkt_pcap_relative_path.py] [source,python] @@ -1896,7 +1989,7 @@ file: link:{github_stl_path}/udp_1pkt_pcap_relative_path.py[udp_1pkt_pcap_relati The following example demonstrates a way to load pcap with *number* of packets and for each packet create a stream with burst of 1. -file: link:{github_stl_path}/pcap.py[pcap.py] +*file*:: link:{github_stl_path}/pcap.py[pcap.py] [source,python] ---- @@ -2040,7 +2133,7 @@ $./stl-sim -f stl/pcap.py --yaml The following example demonstrates a way to load pcap file to many stream and attach to each stream a Field Engine program. For example change the IP.src of all the streams to a random number -file: link:{github_stl_path}/pcap_with_vm.py[stl/pcap_with_vm.py] +*file*:: link:{github_stl_path}/pcap_with_vm.py[stl/pcap_with_vm.py] [source,python] ---- @@ -2120,7 +2213,7 @@ pkt, IPv4 , flow The following example demonstrates creating IPv6 packet inside IPv4 packet and create a range of IPs -file: link:{github_stl_path}/udp_1pkt_ipv6_in_ipv4.py[stl/udp_1pkt_ipv6_in_ipv4.py] +*file*:: link:{github_stl_path}/udp_1pkt_ipv6_in_ipv4.py[stl/udp_1pkt_ipv6_in_ipv4.py] [source,python] ---- @@ -2438,7 +2531,7 @@ Under the hood there is a compiler that converts it to native scapy/field engine The support is limited to [TBD] this argument. -file: link:{github_stl_path}/hlt/hlt_udp_inc_dec_len_9k.py[stl/hlt/hlt_udp_inc_dec_len_9k.py] +*file*:: link:{github_stl_path}/hlt/hlt_udp_inc_dec_len_9k.py[stl/hlt/hlt_udp_inc_dec_len_9k.py] [source,python] ---- @@ -2492,13 +2585,65 @@ It can be converted to native json or YAML $ ./stl-sim -f stl/hlt/hlt_udp_inc_dec_len_9k.py --josn ---- -or converted to native Python native Scapy/FE using this command +or converted to native Python profile you can use this command [source,bash] ---- $ ./stl-sim -f stl/hlt/hlt_udp_inc_dec_len_9k.py --native ---- +.Auto generated code +[source,python] +---- +# !!! Auto-generated code !!! +from trex_stl_lib.api import * + +class STLS1(object): + def get_streams(self): + streams = [] + + packet = (Ether(src='00:00:01:00:00:01', dst='00:00:00:00:00:00', type=2048) / + IP(proto=17, chksum=5882, len=9202, ihl=5L, id=0) / + UDP(dport=12, sport=1025, len=9182, chksum=55174) / + Raw(load='!' * 9174)) + vm = CTRexScRaw([CTRexVmDescFlowVar(name='pkt_len', size=2, op='inc', + init_value=64, min_value=64, max_value=9216, step=1), + CTRexVmDescTrimPktSize(fv_name='pkt_len'), + CTRexVmDescWrFlowVar(fv_name='pkt_len', + pkt_offset=16, add_val=-14, is_big=True), + CTRexVmDescWrFlowVar(fv_name='pkt_len', + pkt_offset=38, add_val=-34, is_big=True), + CTRexVmDescFixIpv4(offset=14)], split_by_field = 'pkt_len') + stream = STLStream(packet = CScapyTRexPktBuilder(pkt = packet, vm = vm), + mode = STLTXCont(pps = 1.0)) + streams.append(stream) + + packet = (Ether(src='00:00:01:00:00:01', dst='00:00:00:00:00:00', type=2048) / + IP(proto=17, chksum=5882, len=9202, ihl=5L, id=0) / + UDP(dport=12, sport=1025, len=9182, chksum=55174) / + Raw(load='!' * 9174)) + vm = CTRexScRaw([CTRexVmDescFlowVar(name='pkt_len', size=2, op='dec', + init_value=9216, min_value=64, + max_value=9216, step=1), + CTRexVmDescTrimPktSize(fv_name='pkt_len'), + CTRexVmDescWrFlowVar(fv_name='pkt_len', pkt_offset=16, + add_val=-14, is_big=True), + CTRexVmDescWrFlowVar(fv_name='pkt_len', + pkt_offset=38, add_val=-34, is_big=True), + CTRexVmDescFixIpv4(offset=14)], split_by_field = 'pkt_len') + stream = STLStream(packet = CScapyTRexPktBuilder(pkt = packet, vm = vm), + mode = STLTXCont(pps = 1.0)) + streams.append(stream) + + return streams + +def register(): + return STLS1() +---- + + + + to run it using using the TRex Console [source,bash] diff --git a/waf.css b/waf.css index e9fa3f50..ba2fbb48 100755 --- a/waf.css +++ b/waf.css @@ -37,3 +37,7 @@ body, div.sectionbody, div#toctitle { font-family: 'Lucida Grande', Verdana, Arial, sans-serif; } +.monospaced, code, pre { + font-family: Consolas, 'Liberation Mono', Menlo, Courier, monospace; +} + -- cgit 1.2.3-korg From 5a6d8209917ce65d7cf6c93fe9397cd17c27d9fe Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 9 Mar 2016 15:43:59 +0200 Subject: minors --- draft_trex_stateless.asciidoc | 58 +++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 32 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 15fa7136..0a6f7d3c 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -1953,9 +1953,11 @@ In some cases there is a need to split the streams to thread in a way that speci In the above figure we would like to that stream S3 will start on all the thread after S2 was finished by all the threads -==== Tutorial: Pcap file to *one* stream +==== Tutorial: Pcap file to one stream -There is a way to load *one* packet data into a stream. There is an assumption that this pcap. only the first packet from this pcap is taken. +*Goal*:: Load stream template packet from pcap file instaed of scapy. + +There is an assumption that this pcap has one packet. In case it has more only the first packet is loaded. *file*:: link:{github_stl_path}/udp_1pkt_pcap.py[stl/udp_1pkt_pcap.py] @@ -1985,9 +1987,9 @@ There is a way to load *one* packet data into a stream. There is an assumption ---- <1> packet is taken from pcap file relative to *profile* file location -==== Tutorial: Pcap file to many streams +==== Tutorial: Pcap file conversion to many streams -The following example demonstrates a way to load pcap with *number* of packets and for each packet create a stream with burst of 1. +*Goal*:: Demonstrates a way to load pcap with *number* of packets and for each packet create a stream with burst of 1. the ISG for each stream is the inter packet gap (IPG) *file*:: link:{github_stl_path}/pcap.py[pcap.py] @@ -2003,16 +2005,15 @@ The following example demonstrates a way to load pcap with *number* of packets a ---- <1> The inter stream gap in usec <2> How many times to loop -<3> the pcap file - +<3> The input pcap file image::images/stl_tut_pcap_file1.png[title="pcap file",align="left",width=300, link="images/stl_tut_pcap_file1.png"] -This figure illustrates how the streams look like for pcap file with 3 packet. +This figure illustrates how the streams look like for pcap file with 3 packets. * Each stream is configured to burst with one packet * Each stream point to the next stream. * The last stream point to the first with action_loop=loop_count in case it was asked (>1) -The profile will run on only one DP thread because it has burst with one packet (see Split example) +The profile will run on one DP thread because it has burst with one packet (Split can work in this case) Running this example @@ -2122,11 +2123,10 @@ $./stl-sim -f stl/pcap.py --yaml instructions: [] split_by_var: '' ---- -<1> each stream point to the next stream -<2> last point to the first -<3> the number of loop is given in `action_count: 1` -<4> self_start is disabled for all the streams except the first one - +<1> Each stream point to the next stream +<2> Last point to the first +<3> The number of loop is given in `action_count: 1` +<4> Self_start is disabled for all the streams except the first one ==== Tutorial: Pcap file to many streams and Field Engine @@ -2246,7 +2246,7 @@ The following example demonstrates creating IPv6 packet inside IPv4 packet and c ==== Tutorial: Mask instruction -The STLVmWrMaskFlowVar is a handy command. The pseudocode is a folow +The STLVmWrMaskFlowVar is a handy instruction. The pseudocode is as follows: .Pseudocode [source,bash] @@ -2268,7 +2268,7 @@ The STLVmWrMaskFlowVar is a handy command. The pseudocode is a folow ---- -===== Example 1 +*Example 1*:: [source,python] ---- @@ -2287,7 +2287,7 @@ The STLVmWrMaskFlowVar is a handy command. The pseudocode is a folow This will cast stream variable with 2 byte to be 1 byte -===== Example 2 +*Example 2*:: [source,python] ---- @@ -2316,7 +2316,7 @@ The output will be shift by 8 0x0300 |================= -===== Example 3 +*Example 3*:: [source,python] ---- @@ -2350,11 +2350,9 @@ value 0x01 |================= - ==== Tutorial: Advance traffic profile - platform [TODO] - -===== Direction +*Direction*:: To make the traffic profile more usable, the traffic profile support per direction/interface. @@ -2391,7 +2389,7 @@ interfaces 1/3 is direction 1 So rate will be changed accordingly. -===== Per Interface +*Per Interface*:: In this case there is a different profile base on interface ID @@ -2453,7 +2451,7 @@ def create_streams (self, direction = 0, **args): The Console will give the port/direction and will get the right stream in each interface -===== Tunable +*Tunable*:: [source,python] ---- @@ -2523,13 +2521,12 @@ class STLS1(object): -=== Tutorials HLT profile +==== Tutorial: HLT traffic profile -HLTAPI is a Cisco standard API for traffic generation.IXIA and Spirent support this standard. traffic_config API has set of arguments for specifying the packet, how to send it and what field to change while sending it. -We created a Python module that you can specify the traffic profile in HLT like format and load it as native profile for smooth transition . +traffic_config API has set of arguments for specifying stream. In particular the packet template and which field and how to send it. +It is possible to define a traffic profile using HTTAPI arguments . Under the hood there is a compiler that converts it to native scapy/field engine instruction -The support is limited to [TBD] this argument. - +The support is limited, see xref:altapi-support[here]. *file*:: link:{github_stl_path}/hlt/hlt_udp_inc_dec_len_9k.py[stl/hlt/hlt_udp_inc_dec_len_9k.py] @@ -2642,8 +2639,6 @@ def register(): ---- - - to run it using using the TRex Console [source,bash] @@ -2651,7 +2646,6 @@ to run it using using the TRex Console TRex>start -f stl/hlt/hlt_udp_inc_dec_len_9k.py -m 10mbps -a ---- - more profiles and example can be found in `stl/hlt` folder @@ -2698,7 +2692,7 @@ Some guidelines: | WORK (pause) -> PAUSE (resume )--- | | | | - ------------------------------------ + -------------------------------------- ----- @@ -3040,7 +3034,7 @@ get keyboard === Appendix -==== HLT supported Arguments +==== HLT supported Arguments anchor:altapi-support[] [source,python] -- cgit 1.2.3-korg From 369374a52d19947205e7e56fbfe8ea12431192d5 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 9 Mar 2016 15:56:15 +0200 Subject: remove trex mock from doc --- trex_rpc_server_spec.asciidoc | 37 ++++++++----------------------------- 1 file changed, 8 insertions(+), 29 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 716193fc..8d8bcc14 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -41,6 +41,11 @@ The TRex RPC Server |================= +== Audience of this document + +* Anyone that wants to understand the low level protocol to TRex server +* GUI developer that want to develop a GUI for TRex Server + == RPC Support On TRex TRex implements a RPC protocol in order to config, view and @@ -95,33 +100,8 @@ and perform adjustments to the request. TRex also provides a Python based console that can connect to the server (mock or real) and send various commands to the server. -==== Building The Mock Server -Building the mock server is performed like this: -[source,bash] ----- -trex-core/linux> ./b configure -trex-core/linux> ./b --target=mock-rpc-server-64-debug ----- - -==== Running The Mock Server -Launching the mock server is performed like this: - -[source,bash] ----- - -trex-core/scripts> ./mock-rpc-server-64-debug - --= Starting RPC Server Mock =- - -Listening on tcp://localhost:5050 [ZMQ] - -Setting Server To Full Verbose - -Server Started - ----- - ==== Using The TRex Console To Interact + When the mock server is up, you can already send commands to the server. {zwsp} + {zwsp} + @@ -132,7 +112,7 @@ Let's demonstrate the operation with the Python based TRex console: [source,bash] ---- -trex-core/scripts> ./trex-console +trex-core/scripts>./trex-console Connecting To RPC Server On tcp://localhost:5050 [SUCCESS] @@ -188,9 +168,8 @@ On the 'server' side: [source,bash] ---- -trex-core/scripts> ./mock-rpc-server-64-debug +trex-core/scripts>./t-rex-64 -i --= Starting RPC Server Mock =- Listening on tcp://localhost:5050 [ZMQ] -- cgit 1.2.3-korg From 6a81f64e18532fcef5a79058cd62b3b3418868a5 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 9 Mar 2016 15:59:02 +0200 Subject: remove obsolete stuf --- trex_rpc_server_spec.asciidoc | 195 ------------------------------------------ 1 file changed, 195 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 8d8bcc14..0fd056bd 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -1253,201 +1253,6 @@ Describes the log off from the machine * *release* - release the ownership over the device -== Higher Level implementation examples -The following examples represents common scenarios implemented by a higher layer, which uses the API described above. - -The examples are written in Python, however similar examples can be shown in any programming language. - -=== CTRexPktBuilder class description -`CTRexPktBuilder` is a Python module designed to provide a progammer API for dynamic packet building. -Since the packet is built to be used by TRex, a `CTRexVM` subclass has been created to describe how TRex should use the described packet in its transmission. - -While the entire `CTRexPktBuilder` class (which is initialized by specifying the total length of the packet) responsible to both building the packet layer by layer, the `CTRexVM` class is responsible for controlling the ranging of the values as desribed in the <>, and other attributes being used by TRex data-plane once the server receives its streams. - - -=== Creating an example packet -The following conde snippet describes how an ICMP Echo packet is built. - -[source, python, numbered] ----- -from packet_builder import CTRexPktBuilder -import dpkt - -pkt_bld = CTRexPktBuilder() # <1> -pkt_bld.add_pkt_layer("l2", dpkt.ethernet.Ethernet()) -# set Ethernet layer attributes -pkt_bld.set_eth_layer_addr("l2", "src", "00:15:17:a7:75:a3") -pkt_bld.set_eth_layer_addr("l2", "dst", "e0:5f:b9:69:e9:22") -pkt_bld.set_layer_attr("l2", "type", dpkt.ethernet.ETH_TYPE_IP) -# set IP layer attributes -pkt_bld.add_pkt_layer("l3_ip", dpkt.ip.IP()) -pkt_bld.set_ip_layer_addr("l3_ip", "src", "21.0.0.2") -pkt_bld.set_ip_layer_addr("l3_ip", "dst", "22.0.0.12") -pkt_bld.set_layer_attr("l3_ip", "p", dpkt.ip.IP_PROTO_ICMP) -# set ICMP layer attributes -pkt_bld.add_pkt_layer("icmp", dpkt.icmp.ICMP()) -pkt_bld.set_layer_attr("icmp", "type", dpkt.icmp.ICMP_ECHO) -# set Echo(ICMP) layer attributes -pkt_bld.add_pkt_layer("icmp_echo", dpkt.icmp.ICMP.Echo()) -pkt_bld.set_layer_attr("icmp_echo", "id", 24528) -pkt_bld.set_layer_attr("icmp_echo", "seq", 11482) -pkt_bld.set_pkt_payload('hello world') -# finally, set IP header len with relation to payload data -pkt_bld.set_layer_attr("l3_ip", "len", len(pkt_bld.get_layer('l3_ip'))) ----- - -<1> Initialize the packet builder instance. - -This example created a packet without any ranging to it, so in this case TRex is expected to reply the same packet over and over without any changes to it. - -When adding sending this packet as part of the <<_add_stream, Add Stream>> command, the packet content specified under `packet` would look for the created ICMP packet like this: - -[source, python] ----- ->>> print pkt_bld.dump_pkt() - [224, 95, 185, 105, 233, 34, 0, 21, 23, 167, 117, 163, 8, 0, 69, 0, 0, 39, - 0, 0, 0, 0, 64, 1, 79, 201, 21, 0, 0, 2, 22, 0, 0, 12, 8, 0, 217, 134, 95, - 208, 44, 218, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100] ----- - -Each of the array items representing a byte data-representation, hence ranging from 0 to 255. - -=== Create a packet with single ranging instruction -The following example creates an HTTP GET packet, hence layering Ethernet/IP/TCP/HTTP. - -[source, python, numbered] ----- -from packet_builder import CTRexPktBuilder -import dpkt - -pkt_bld = CTRexPktBuilder() -pkt_bld.add_pkt_layer("l2", dpkt.ethernet.Ethernet()) -# set Ethernet layer attributes -pkt_bld.set_eth_layer_addr("l2", "src", "00:15:17:a7:75:a3") -pkt_bld.set_eth_layer_addr("l2", "dst", "e0:5f:b9:69:e9:22") -pkt_bld.set_layer_attr("l2", "type", dpkt.ethernet.ETH_TYPE_IP) -# set IP layer attributes -pkt_bld.add_pkt_layer("l3_ip", dpkt.ip.IP()) -pkt_bld.set_ip_layer_addr("l3_ip", "src", "21.0.0.2") -pkt_bld.set_ip_layer_addr("l3_ip", "dst", "22.0.0.12") -pkt_bld.set_layer_attr("l3_ip", "p", dpkt.ip.IP_PROTO_TCP) -# set TCP layer attributes -pkt_bld.add_pkt_layer("l4_tcp", dpkt.tcp.TCP()) -pkt_bld.set_layer_attr("l4_tcp", "sport", 13311) -pkt_bld.set_layer_attr("l4_tcp", "dport", 80) -pkt_bld.set_layer_attr("l4_tcp", "flags", 0) -pkt_bld.set_layer_attr("l4_tcp", "win", 32768) -pkt_bld.set_layer_attr("l4_tcp", "seq", 0) -# set packet payload, for example HTTP GET request -pkt_bld.set_pkt_payload('GET /10k_60k HTTP/1.1\r\nHost: 22.0.0.3\r\nConnection: Keep-Alive\r\nUser-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\r\nAccept: */*\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate, compress\r\n\r\n') - -# finally, set IP header len with relation to payload data -pkt_bld.set_layer_attr("l3_ip", "len", len(pkt_bld.get_layer('l3_ip'))) ----- - -Now, we extened the single packet created with three VM instructions, in order to range over the source IP of the packet. - -[source, python, numbered] ----- -pkt_bld.set_vm_ip_range(ip_layer_name="l3_ip", # <1> - ip_field="src", # <2> - ip_init="10.0.0.1", ip_start="10.0.0.1", ip_end="10.0.0.255", - add_value=1, - operation="inc") ----- - -<1> `l3_ip` corresponds with the layer name given to the IP layer of the packet. This helps identifying and diffrenciate in packet containing more than one IP header. - -<2> the name of the field on which we want to range. - -Now, we added ranging for source IP starting from 10.0.0.1 to 10.0.0.255. -This will generate the follwing VM instructions, which will be provided under `vm` field of the <<_add_stream, add_stream>> command: - -[source, python] ----- ->>> print pkt_bld.vm.dump(), - [{'name': 'l3__src', 'ins_name': 'flow_var', 'max_value': '167772415', 'min_value': '167772161', 'init_value': '167772161', 'size': 4, 'op': 'inc'}, - {'is_big_endian': False, 'pkt_offset': 26, 'type': 'write_flow_var', 'name': 'l3__src', 'add_value': 1}, - {'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}] ----- - -As we can see, three instructions has been generated for this ranging criteria: - -1. `flow_var` instruction - for defining the ranging parameters. - -2. `write_flow_var` instruction - for specifying where and how the modification should take place. - -3. `fix_checksum_ipv4` instruction - for updated the checksum field - -[WARNING] -The order of the instruction **does matter**. In this example, if the `fix_checksum_ipv4` instruction would have been places prior to the `write_flow_var` instruction, the generated packet would have satyed with the old checksum values. - -[NOTE] -By default, with each change to the IP header, a `fix_checksum_ipv4` instruction is added. This can be canceled by passing `add_checksum_inst=False` in functions which ranges over an IP field. - - -=== Create a packet with multiple ranging instructions -Now, we shall extend our ranging and add another field to range on, this time we'll pick the TOS field of the IP header. - -So, we'll add the following code snippet **ontop of the ranging method we already applied**: - -[source, python, numbered] ----- -pkt_bld.set_vm_custom_range(layer_name="l3_ip", - hdr_field="tos", - init_val="10", start_val="10", end_val="200", add_val=2, val_size=1, - operation="inc") ----- - -So, in this case we chose to range the TOS field from 10 to 200 in steps of 2. - -Finally, let's see the expected JSON output of the VM instructions: - -[source, python] ----- ->>> print pkt_bld.vm.dump() - [{ 'init_value': '167772161', # <1> - 'ins_name': 'flow_var', - 'max_value': '167772415', - 'min_value': '167772161', - 'name': 'l3__src', - 'op': 'inc', - 'size': 4}, - { 'init_value': '10', # <2> - 'ins_name': 'flow_var', - 'max_value': '200', - 'min_value': '10', - 'name': 'l3__tos', - 'op': 'inc', - 'size': 1}, - { 'add_value': 2, # <3> - 'is_big_endian': False, - 'name': 'l3__tos', - 'pkt_offset': 15, - 'type': 'write_flow_var'}, - { 'add_value': 1, # <4> - 'is_big_endian': False, - 'name': 'l3__src', - 'pkt_offset': 26, - 'type': 'write_flow_var'}, - { 'pkt_offset': 14, 'type': 'fix_checksum_ipv4'} # <5> - ] ----- - -<1> `flow_var` instruction for source IP. - -<2> `flow_var` instruction for TOS field - -<3> `write_flow_var` instruction for TOS. - -<4> `write_flow_var` instruction for source IP. - -<5> `fix_checksum_ipv4` instruction for both ranging options - -[NOTE] -In this case only one checksum instruction has been generated, since both ranging options applies to the same IP header. - - :numbered!: [appendix] -- cgit 1.2.3-korg From 0f1b4647ff13d4b1c27ce16105548f0bcb72d5cc Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 9 Mar 2016 15:59:58 +0200 Subject: minor --- trex_rpc_server_spec.asciidoc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 0fd056bd..d23a8969 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -43,8 +43,7 @@ The TRex RPC Server == Audience of this document -* Anyone that wants to understand the low level protocol to TRex server -* GUI developer that want to develop a GUI for TRex Server +Anyone that wants to understand the low level protocol to TRex server. for example a GUI developer that wants to develop a GUI for TRex Server. == RPC Support On TRex -- cgit 1.2.3-korg From 0e59ebd502916bbe48fe2158c86d4c1a069789ff Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 9 Mar 2016 16:23:25 +0200 Subject: fix pdf for stateless --- draft_trex_stateless.asciidoc | 57 +++++++++++++++++++-------------- trex_rpc_server_spec.asciidoc | 73 ++----------------------------------------- 2 files changed, 35 insertions(+), 95 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 8884a89c..9174729b 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -11,6 +11,16 @@ TRex Stateless support :github_stl_examples_path: https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/automation/trex_control_plane/stl/examples :toclevels: 6 +ifdef::backend-docbook[] +:p_width: 450 +:p_width_1: 200 +endif::backend-docbook[] + +ifdef::backend-xhtml11[] +:p_width: 800 +:p_width_1: 400 +endif::backend-xhtml11[] + == Stateless support (Alpha stage) @@ -44,7 +54,7 @@ TRex Stateless support ==== Traffic profile example -image::images/stl_streams_example.png[title="Streams example",align="left",width=600, link="images/stl_streams_example.png"] +image::images/stl_streams_example.png[title="Streams example",align="left",width={p_width}, link="images/stl_streams_example.png"] ==== High level functionality - near future @@ -89,7 +99,7 @@ To support interactive mode, JSON-RPC2 thread added to the TRex Control Plane co The following diagram illustrates the RPC server/client components -image::images/trex_2_stateless.png[title="RPC Server Position",align="left",width=800, link="images/trex_2_stateless.png"] +image::images/trex_2_stateless.png[title="RPC Server Position",align="left",width={p_width}, link="images/trex_2_stateless.png"] * The Control transport protocol is ZMQ working in REQ/RES mode * JSON-RPC2 is the RPC protocol on top of the ZMQ REQ/RES @@ -103,7 +113,7 @@ image::images/trex_2_stateless.png[title="RPC Server Position",align="left",widt * In case of crash/exit of the Client it should sync again at connection time. * Client has the ability to get a statistic in real time (with ASYNC ZMQ). It gives the option to have number of ways to look into the statistics (GUI and Console) at the same time. -image::images/trex_stateless_multi_user.png[title="Multi user-per interface",align="left",width=800, link="images/trex_stateless_multi_user.png"] +image::images/trex_stateless_multi_user.png[title="Multi user-per interface",align="left",width={p_width}, link="images/trex_stateless_multi_user.png"] For more detailed see RPC specification link:trex_rpc_server_spec.html[here] @@ -115,7 +125,7 @@ This Architecture provides the following advantages: === TRex Entities -image::images/stateless_objects.png[title="TRex Objects ",align="left",width=600, link="images/stateless_objects.png"] +image::images/stateless_objects.png[title="TRex Entities",align="left",width={p_width_1}, link="images/stateless_objects.png"] * *TRex*: Each TRex instance includes a number of interfaces * *Interface*: For each Interface it is possible to add/remove a number of traffic profiles (TP) @@ -131,7 +141,7 @@ image::images/stateless_objects.png[title="TRex Objects ",align="left",width=600 ==== TRex package folders -[cols="1,5", options="header",width="80%"] +[cols="5,5", options="header",width="100%"] |============================= | Location | Description | / | t-rex-64/dpdk_set_ports/stl-sim @@ -394,7 +404,7 @@ In case a user configures a source or destination MAC explicitly this MAC will t .MAC addrees -[format="csv",cols="2^,2^,2^", options="header",width="50%"] +[format="csv",cols="2^,2^,2^", options="header",width="100%"] |================= Scapy , Source MAC,Destination MAC Ether() , trex_cfg (src),trex_cfg(dst) @@ -736,6 +746,7 @@ if __name__ == "__main__": <2> import HLT TRex + ==== Tutorial: Simple IPv4/UDP packet - Simulator @@ -832,7 +843,7 @@ $ ./stl-sim -f stl/udp_1pkt_simple.py -o b.pcap -l 10 The following figure presents the output pcap file -image::images/stl_tut_1.png[title="Wireshark Tutorial 1 output",align="left",width=800, link="images/stl_tut_1.png.png"] +image::images/stl_tut_1.png[title="Wireshark Tutorial 1 output",align="left",width={p_width}, link="images/stl_tut_1.png.png"] .To look into the JSON command to the server [source,bash] @@ -1071,7 +1082,7 @@ The following example demonstrates 3 streams with different rates (pps=10,20,40) The output:: The folowing figure present the output -image::images/stl_inter.png[title="Interleave streams",align="left",width=600, link="images/stl_inter.png"] +image::images/stl_inter.png[title="Interleave streams",align="left",width={p_width}, link="images/stl_inter.png"] Discussion:: @@ -1200,7 +1211,7 @@ TRex>start -f stl/stl/burst_3pkt_600pkt.py --port 0 <2> Multi burst of 5 bursts of 4 packets with a inter burst gap of one second -image::images/stl_tut_4.png[title="Streams example",align="left",width=600, link="images/stl_tut_4.png"] +image::images/stl_tut_4.png[title="Streams example",align="left",width={p_width}, link="images/stl_tut_4.png"] ==== Tutorial: Loops of streams @@ -1605,7 +1616,7 @@ For more information how to define headers see Scapy link:http://www.secdev.org/ The following example demonstrates a way to generate traffic from many clients with different IP/MAC to one server. The following figure shows it. -image::images/stl_tut_12.png[title="client->server",align="left",width=600, link="images/stl_tut_12.png"] +image::images/stl_tut_12.png[title="client->server",align="left",width={p_width}, link="images/stl_tut_12.png"] 1. Send gratuitous ARP from B->D with server IP/MAC (58.55.1.1) 2. DUT learn the ARP of Server IP/MAC (58.55.1.1) @@ -1934,7 +1945,7 @@ pkt, thread-0 ip_src,thread-1 ip_src The following example demonstrates a way create a Stream with no packets. The use cases is to use the Null stream inter stream gap (ISG) and then go to a new stream. using this you can create loops like this: -image::images/stl_null_stream.png[title="Null Stream",align="left",width=600, link="images/stl_null_stream.png"] +image::images/stl_null_stream.png[title="Null Stream",align="left",width={p_width}, link="images/stl_null_stream.png"] 1. S1 - send_burst of packets, go to stream NULL 2. NULL - wait ISG time - go to S1 @@ -1947,7 +1958,7 @@ Null stream is with configured with ==== Tutorial: Field Engine, Barrier stream (Split) - [TODO] -image::images/stl_barrier.png[title="Barrier Stream",align="left",width=600, link="images/stl_barrier.png"] +image::images/stl_barrier.png[title="Barrier Stream",align="left",width={p_width}, link="images/stl_barrier.png"] In some cases there is a need to split the streams to thread in a way that specific stream will continue only after all the threads pass the same path. In the above figure we would like to that stream S3 will start on all the thread after S2 was finished by all the threads @@ -2007,7 +2018,7 @@ There is an assumption that this pcap has one packet. In case it has more only t <2> How many times to loop <3> The input pcap file -image::images/stl_tut_pcap_file1.png[title="pcap file",align="left",width=300, link="images/stl_tut_pcap_file1.png"] +image::images/stl_tut_pcap_file1.png[title="pcap file",align="left",width={p_width}, link="images/stl_tut_pcap_file1.png"] This figure illustrates how the streams look like for pcap file with 3 packets. * Each stream is configured to burst with one packet @@ -2488,20 +2499,18 @@ In this example, change the fsize to 1500 bytes * Per stream statistics is implemented using hardware assist when possible (X710/XL710 Intel NICs flow director rules for example). * With other NICs (Intel I350, 82599) it is implemented in software. * Implementation works as follows: -1. User chooses 32 bit packet group id (pg_id). -1. IPv4 Identification field of the stream is changed to a value with in a reserved range (0xff00 to 0xffff). Notice that if a stream for which -no statistics is needed has IPv4 Identification in the reserved range, it is changed (left bit becomes 0). -1. In the software implementation, hardware rules are used to direct packets from relevant streams to rx thread, where they are counted. -In the hardware implementation, HW rules are inserted to count packets from relevant streams. -1. Summed up statistics (per stream, per port) are sent using ZMQ async channel to clients. +** User chooses 32 bit packet group id (pg_id). +** IPv4 Identification field of the stream is changed to a value with in a reserved range (0xff00 to 0xffff). Notice that if a stream for which no statistics is needed has IPv4 Identification in the reserved range, it is changed (left bit becomes 0). -* Limitations: +* In the software implementation, hardware rules are used to direct packets from relevant streams to rx thread, where they are counted. In the hardware implementation, HW rules are inserted to count packets from relevant streams. +* Summed up statistics (per stream, per port) are sent using ZMQ async channel to clients. -1. Currently, the feature supports only two packet types: -a. IPv4 over ethernet -b. IPv4 with one vlan tag -2. Number of concurrent streams you can get statistics for is 128. +*Limitations*:: +* Currently, the feature supports only two packet types: +** IPv4 over ethernet +** IPv4 with one vlan tag +* Number of concurrent streams you can get statistics for is 128. [source,python] ---- diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index d23a8969..a073c6c3 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -91,45 +91,9 @@ http://www.jsonrpc.org/specification Later on in the document we will describe all the supported commands. -=== TRex RPC Mock Server -Before we get into the commands, it's worth mentioning that TRex has a mock RPC server -designed to allow playing around with the server in order to understand the response -and perform adjustments to the request. +=== TRex Console -TRex also provides a Python based console that can connect to the server (mock or real) and -send various commands to the server. - -==== Using The TRex Console To Interact - -When the mock server is up, you can already send commands to the server. -{zwsp} + -{zwsp} + - -Let's demonstrate the operation with the Python based TRex console: - -{zwsp} + - -[source,bash] ----- -trex-core/scripts>./trex-console - -Connecting To RPC Server On tcp://localhost:5050 -[SUCCESS] - - --=TRex Console V1.0=- - -Type 'help' or '?' for supported actions - -TRex > - ----- -As we will see later on, a basic RPC command supported by the server is 'ping'. -{zwsp} + -Let's issue a ping command to the server and see what happens on both sides: - -{zwsp} + -{zwsp} + +To debug RPC it is possible to enable verbose command from Console see link:draft_trex_stateless.html#_console_commands[here] On the 'client' side: @@ -161,39 +125,6 @@ TRex > ping [SUCCESS] ----- -On the 'server' side: - -[source,bash] ----- - -trex-core/scripts>./t-rex-64 -i - - -Listening on tcp://localhost:5050 [ZMQ] - -Setting Server To Full Verbose - -Server Started - - -[verbose][req resp] Server Received: - -{ - "id" : "maa5a3g1", - "jsonrpc" : "2.0", - "method" : "ping", - "params" : null -} - -[verbose][req resp] Server Replied: - -{ - "id" : "maa5a3g1", - "jsonrpc" : "2.0", - "result" : {} -} - ---- == RPC Server Component Position Illustration -- cgit 1.2.3-korg From 7df19261dedad2840ebd9a58a75df96a1d7acd9c Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Wed, 9 Mar 2016 16:32:29 +0200 Subject: trex_book: QSFP+ both copper and optical supported --- trex_book.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 3c7b30fd..ed6d8b37 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -107,7 +107,7 @@ TRex curretly works on x86 architecture and can operates well on Cisco UCS hardw | Bandwidth | Chipset | Example | 1Gb/sec | Intel I350 | Intel 4x1GE 350-T4 NIC | 10Gb/sec | Intel 82599 | Intel x520-D2 Cisco Order tool 2X Intel N2XX-AIPCI01, Intel X520 Dual Port 10Gb SFP+ Adapter -| 40Gb/sec | Intel XL710 Intel X710 | QSFP+, SFP+ +| 40Gb/sec | Intel XL710 Intel X710 | QSFP+ (copper/optical), SFP+ | VMXNET / + VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch | E1000 | paravirtualize | vmWare/KVM/VirtualBox -- cgit 1.2.3-korg From 0ffc745ac18db40a236162058f6eb2e2b87e3de4 Mon Sep 17 00:00:00 2001 From: beubanks Date: Wed, 9 Mar 2016 17:25:29 -0500 Subject: Minor correction to Highl level functionality - near future list. --- draft_trex_stateless.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 9174729b..dd957eaf 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -58,7 +58,7 @@ image::images/stl_streams_example.png[title="Streams example",align="left",width ==== High level functionality - near future -* ARP emulation - learn server MAC. Support unlimited of MAC addresses per port +* ARP emulation - learn server MAC. Support unlimited MAC addresses per port. ==== High level functionality - roadmap -- cgit 1.2.3-korg From 852e9556cbabcaa645e5def016ec47e942c35744 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 10 Mar 2016 17:32:30 +0200 Subject: add stateless vs stateful --- draft_trex_stateless.asciidoc | 68 ++++++++++++++++++++++++++++++++++++++----- wscript | 2 +- 2 files changed, 62 insertions(+), 8 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 9174729b..c10c8a00 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -35,7 +35,7 @@ endif::backend-xhtml11[] ** Field engine program *** Ability to change any field inside the packet, for example src_ip = 10.0.0.1-10.0.0.255 *** Ability to change the packet size (e.g. Random packet size 64-9K) -** Mode -Continues/Burst/Multi burst support +** Mode - Continuous/Burst/Multi burst support ** Rate can be specified in: *** Packet per second -(e.g. 14MPPS) *** L1 bandwidth (e.g. 500Mb/sec) @@ -78,7 +78,7 @@ TRex has limited functionality compared to IXIA, but has some advantages. The fo | Multi stream | 255 | [green]*Unlimited* | | Packet build flexibility | Limited | [green]*Scapy- Ulimited* | e.g GRE/VXLAN/NSH is supported. Can be extended to future protocols | Packet Field engine | limited | [green]*Unlimited* | -| Tx Mode | Continues/Burst/Multi burst | Continues/Burst/Multi burst| +| Tx Mode | Continuous/Burst/Multi burst | Continuous/Burst/Multi burst| | ARP Emulation | Yes | Not yet - workaround | | Automation | TCL/Python wrapper to TCL | [green]*native Python/Scapy* | | Automation speed sec| 30sec | [green]*1msec* | test of load/start/stop/get counters @@ -133,13 +133,37 @@ image::images/stateless_objects.png[title="TRex Entities",align="left",width={p_ * *Stream*: Each stream includes ** *Packet*: Packet template up to 9K bytes ** *Field Engine*: which field to change, do we want to change packet size -** *Mode*: how to send the packet. Continues/Burst/Multi Burst +** *Mode*: how to send the packet. Continuous/Burst/Multi Burst ** *Rx Stats* Which Statstistic to collect for each stream ** *Rate*: Specified in Packet Per Second (pps) or bandwidth (bps) -** *Action*: The next stream to go after this stream is finished. Valid for Burst/Continues mode +** *Action*: The next stream to go after this stream is finished. Valid for Burst/Continuous mode -==== TRex package folders +=== Statful vs Stateless + +TRex Stateless support is basic L2/L3 tests more for Switch/Router. +With Stateless it is possible to define a Stream that has a *one* packet template, define a program to change any fields in the packet and run it in continues/burst/multi-burst mode. +With Statless you *can't* learn NAT translation because there is no context of flow/client/server. In Stateful the basic building block is a flow/application (That compose from many packets). +However, Using Stateless mode, it is much more flexible as you can define any type of packets and build simple program and in a way you can mimic Stateful but not everything. +For example, you can load a pcap with the number of packets as a link of streams +a->b->c->d-> back to a +And create a program for each stream to change src_ip=10. 0.0.1-10.0.0.254 this will create something similar to Stateful but the underline is totally different. +If you are confused you probably need Stateless. + +.Statful vs Stateless +[cols="1^,3^,3^", options="header"] +|================= +| Feature | Stateless |Statful +| Flow base | No | Yes +| NAT | No | Yes +| Tunnel | Yes | Only specific +| L7 App emulation | No | Yes +| Any type of packet | Yes | No +| Latency Jitter | Per Stream | Global/Per flow +|================= + + +=== TRex package folders [cols="5,5", options="header",width="100%"] |============================= @@ -203,7 +227,7 @@ def register(): return STLS1() ---- <1> Define the packet, in this case it IP/UDP with 10 bytes of 'x'(0x78) .See more here link:http://www.secdev.org/projects/scapy/doc/[Scapy] -<2> Mode is Continues with a rate of 1 pps (default rate is 1 PPS) +<2> Mode is Continuous with a rate of 1 pps (default rate is 1 PPS) <3> get_streams function is mandatory <4> Each Traffic profile module should have a `register` function @@ -796,7 +820,7 @@ def register(): return STLS1() ---- <1> Define the packet, in this case it IP/UDP with 10 bytes of 'x' -<2> Mode is Continues with rate of 1 PPS (default rate is 1 PPS) +<2> Mode is Continuous with rate of 1 PPS (default rate is 1 PPS) <3> Each Traffic profile module should have a `register` function Now let try to run it throw TRex simulator limiting the number of packets to 10 @@ -3048,6 +3072,36 @@ get keyboard === Appendix +==== Scapy packet examples + +[source,python] +---- + +# udp header +Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) + +# UDP over one valn +Ether()/Dot1Q(vlan=12)/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) + +# UDP QinQ +Ether()/Dot1Q(vlan=12)/Dot1Q(vlan=12)/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) + +#TCP over IP ove VALN +Ether()/Dot1Q(vlan=12)/IP(src="16.0.0.1",dst="48.0.0.1")/TCP(dport=12,sport=1025) + +# IPv6 over valn +Ether()/Dot1Q(vlan=12)/IPv6(src="::5")/TCP(dport=12,sport=1025) + +#Ipv6 over UDP over IP +Ether()/IP()/UDP()/IPv6(src="::5")/TCP(dport=12,sport=1025) + +#DNS packet +Ether()/IP()/UDP()/DNS() + +#HTTP packet +Ether()/IP()/TCP()/"GET / HTTP/1.1\r\nHost: www.google.com\r\n\r\n" +---- + ==== HLT supported Arguments anchor:altapi-support[] diff --git a/wscript b/wscript index f7e5024d..c7823d1c 100755 --- a/wscript +++ b/wscript @@ -179,7 +179,7 @@ def build(bld): source='trex_book.asciidoc waf.css', target='trex_manual.html', scan=ascii_doc_scan) bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', - source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless.html', scan=ascii_doc_scan) + source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless1.html', scan=ascii_doc_scan) bld(rule=convert_to_pdf_book, source='trex_book.asciidoc waf.css', target='trex_book.pdf', scan=ascii_doc_scan) -- cgit 1.2.3-korg From 3aeaf856c9f1d1f081d2d179343cbe156929ecd2 Mon Sep 17 00:00:00 2001 From: beubanks Date: Thu, 10 Mar 2016 14:18:10 -0500 Subject: Document cleanup edits --- draft_trex_stateless.asciidoc | 84 +++++++++++++++++++++---------------------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index dd957eaf..0a3eed5c 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -76,7 +76,7 @@ TRex has limited functionality compared to IXIA, but has some advantages. The fo | Feature | IXExplorer |TRex | Description | Line rate | Yes |Almost ~15MPPS/core| | Multi stream | 255 | [green]*Unlimited* | -| Packet build flexibility | Limited | [green]*Scapy- Ulimited* | e.g GRE/VXLAN/NSH is supported. Can be extended to future protocols +| Packet build flexibility | Limited | [green]*Scapy- Unlimited* | e.g GRE/VXLAN/NSH is supported. Can be extended to future protocols | Packet Field engine | limited | [green]*Unlimited* | | Tx Mode | Continues/Burst/Multi burst | Continues/Burst/Multi burst| | ARP Emulation | Yes | Not yet - workaround | @@ -95,7 +95,7 @@ TRex has limited functionality compared to IXIA, but has some advantages. The fo === RPC Architecture -To support interactive mode, JSON-RPC2 thread added to the TRex Control Plane core. +To support interactive mode, a JSON-RPC2 thread is added to the TRex Control Plane core. The following diagram illustrates the RPC server/client components @@ -103,15 +103,15 @@ image::images/trex_2_stateless.png[title="RPC Server Position",align="left",widt * The Control transport protocol is ZMQ working in REQ/RES mode * JSON-RPC2 is the RPC protocol on top of the ZMQ REQ/RES -* Async transport is ZMQ working SUB/PUB mode. It is for async event such as interface change mode, counters etc. +* Async transport is ZMQ working SUB/PUB mode. It is for async events such as interface change mode, counters etc. * Python is the first Client to implement the Python automation API * Console utilizes the Python API to implement a user interface to TRex -* Number of users can control one TRex server in parallel as long as they control different Interfaces. TRex Interface can be acquired by a user. For example a TRex with four ports can be used by two users. User A can acquire Interface 0/ 1 and User B can acquire Interface 3/4 -* There could be only *one* control Console/GUI (R/W) entity for a specific user. User A with two interfaces could have only one R/W Control session in specific time. By that we can cache the TRex Server interface information in the Client. -* For one user there could be many read-only clients for getting statistics. -* Client should sync with the server to get the state in connection time and cache the server information locally once the state was changed +* Multiple users can control one TRex server in parallel as long as they control different Interfaces. Individuqal TRex Interfaces can be acquired by a user. For example, a TRex with four ports can be used by two users. User A can acquire Interfaces 0 & 1 and User B can acquire Interfaces 2 & 3. +* There can be only *one* control Console/GUI (R/W) entity for a specific user. User A with two interfaces can have only one R/W Control session active at a specific time. By that we can cache the TRex Server interface information in the Client. +* For one user there can be many read-only clients for getting statistics. +* Client should sync with the server to get the state at connection time and cache the server information locally once the state was changed * In case of crash/exit of the Client it should sync again at connection time. -* Client has the ability to get a statistic in real time (with ASYNC ZMQ). It gives the option to have number of ways to look into the statistics (GUI and Console) at the same time. +* The Client has the ability to get a statistic in real time (with ASYNC ZMQ). This provides the option to have multiple ways to look into the statistics (GUI and Console) at the same time. image::images/trex_stateless_multi_user.png[title="Multi user-per interface",align="left",width={p_width}, link="images/trex_stateless_multi_user.png"] @@ -119,7 +119,7 @@ For more detailed see RPC specification link:trex_rpc_server_spec.html[here] This Architecture provides the following advantages: -* Fast interaction with TRex server. very fast load/start/stop profiles to an interface (~2000 cycles/sec for load/start/stop profile) +* Fast interaction with TRex server. For example, very fast load/start/stop profiles to an interface (~2000 cycles/sec for load/start/stop profile) * Leveraging Python/Scapy for building a packet/Field engine * HLTAPI compiler complexity is done in Python @@ -127,14 +127,14 @@ This Architecture provides the following advantages: image::images/stateless_objects.png[title="TRex Entities",align="left",width={p_width_1}, link="images/stateless_objects.png"] -* *TRex*: Each TRex instance includes a number of interfaces -* *Interface*: For each Interface it is possible to add/remove a number of traffic profiles (TP) -* *Traffic profile*: Each traffic profile includes a number of streams. This is the basic building block of activation. It is possible to add/remove to an interface a profile while other profile already exists. A profile can be looked as a "program" with dependency between streams. It is not possible to change a profile while it is running except changing the rates. -* *Stream*: Each stream includes +* *TRex*: Each TRex instance includes a number of interfaces +* *Interface*: For each Interface it is possible to add/remove a number of traffic profiles (TP) +* *Traffic profile*: Each traffic profile includes a number of streams. This is the basic building block of activation. It is possible to add/remove traffic profiles on an interface while other traffic profiles are active on the interface. A profile can be looked as a "program" with dependency between it's streams. It is not possible to change a profile while it is running except for changing the rates +* *Stream*: Each stream includes: ** *Packet*: Packet template up to 9K bytes -** *Field Engine*: which field to change, do we want to change packet size -** *Mode*: how to send the packet. Continues/Burst/Multi Burst -** *Rx Stats* Which Statstistic to collect for each stream +** *Field Engine*: which field to change, do we want to change the packet size +** *Mode*: How to send the packet. Continuous/Burst/Multi Burst +** *Rx Stats*: Which Statstistic to collect for each stream ** *Rate*: Specified in Packet Per Second (pps) or bandwidth (bps) ** *Action*: The next stream to go after this stream is finished. Valid for Burst/Continues mode @@ -147,7 +147,7 @@ image::images/stateless_objects.png[title="TRex Entities",align="left",width={p_ | / | t-rex-64/dpdk_set_ports/stl-sim | /stl | Stateless native (py) profiles | /stl/yaml | Stateless YAML profiles -| /stl/htl | Stateless HTL profiles +| /stl/hlt | Stateless HLT profiles | /ko | Kernel modules for DPDK | /external_libs | Python external libs used by server/clients | /exp | Golden pcap file for unit-tests @@ -168,11 +168,11 @@ This tutorial will walk you through basic but complete TRex Stateless use cases ==== Tutorial: Simple IPv4/UDP packet - TRex -*Goal*:: send a simple UDP packet from all the ports +*Goal*:: Send a simple UDP packet from all the ports *Traffic profile*:: -Traffic profile (TP) is a way to define *how* to generate the traffic. It defines the traffic templates the rate the mode and which fields in the packet to change. The following example defines a profile with one stream. The stream is with IP/UDP packet template with 10 bytes of 'x'(0x78) of payload. to get more example how to define packets using scapy see here link:http://www.secdev.org/projects/scapy/doc/[Scapy] +Traffic profile (TP) is a way to define *how* to generate the traffic. It defines the traffic templates for the rate, the mode and which fields in the packet to change. The following example defines a profile with one stream. The stream is with IP/UDP packet template with 10 bytes of 'x'(0x78) of payload. to get more example how to define packets using scapy see here link:http://www.secdev.org/projects/scapy/doc/[Scapy] *file*:: link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] @@ -198,7 +198,7 @@ class STLS1(object): return [ self.create_stream() ] -# dynamic load - used for trex console or simulator +# dynamic load - used for TRex console or simulator def register(): <4> return STLS1() ---- @@ -207,12 +207,17 @@ def register(): <3> get_streams function is mandatory <4> Each Traffic profile module should have a `register` function +[NOTE] +===================================================================== +The SRC/DST MAC addrees are taken from /etc/trex_cfg.yaml. if you want to change them to be different just add Ether(dst="00:00:dd:dd:00:01") with your destination +===================================================================== + *Start TRex as a server*:: [NOTE] ===================================================================== -There is no need to install any python packages (including scapy). just download the TRex package +There is no need to install any python packages (including scapy). The TRex package includes all the packages it requires ===================================================================== @@ -346,18 +351,13 @@ Port Statistics dashboard: 'p' - pause, 'c' - clear, '-' - low 5%, '+' - up 5%, ---- -[NOTE] -===================================================================== -The SRC/DST MAC addrees are taken from /etc/trex_cfg.yaml. if you want to change them to be different just add Ether(dst="00:00:dd:dd:00:01") with your destination -===================================================================== - ==== Tutorial: Connect from a remote server *Goal*:: Console connect from a remote machine to TRex server *Check that TRex server is up*:: -Make sure TRex server is running, if not run trex in interactive mode +Make sure TRex server is running, if not run TRex in interactive mode [source,bash] ---- @@ -395,12 +395,12 @@ Client machine should run Python 2.7 and Python 64bit version. Cisco CEL/ADS is ==== Tutorial: Source and Destination MAC address -*Goal*:: Change source/destination MAC addrees +*Goal*:: Change source/destination MAC address -Each TRex port has a source and destination MAC (DUT) configured in /etc/trex_cfg.yaml. +Each TRex port has a source and destination MAC (DUT) configured in /etc/trex_cfg.yaml. The source MAC is not necessarily the hardware MAC address configured in eeprom. By default those MAC (source and destination) is taken. -In case a user configures a source or destination MAC explicitly this MAC will take precedence +In case a user configures a source or destination MAC explicitly this MAC will take precedence. .MAC addrees @@ -424,11 +424,11 @@ For example IP(src="16.0.0.1",dst="48.0.0.1")/ UDP(dport=12,sport=1025) ---- -<1> Don't take TRex port src interface MAC instead replace it with 00:bb:12:34:56:01 +<1> Don't use TRex port src interface MAC. Instead replace it with 00:bb:12:34:56:01 [IMPORTANT] ===================================== -TRex ports will receive a packet only when the packet will have a destination MAC of port defined in the `/etc/trex_cfg.yaml`. To configure the port to be promiscuous and get all the packets on the line you can configure it from API or from Console with `portattr -a --prom` +A TRex port will receive a packet only if the packet has a destination MAC matching the HW Src mac defined for that port in the `/etc/trex_cfg.yaml`. A port can be put into promiscuous mode, allowing receipt of all the packets on the line, by configure it through the API or at the Console with `portattr -a --prom`. ===================================== To show the port mode @@ -462,9 +462,9 @@ NUMA Node | 0 | 0 | Python API examples are located here: `automation/trex_control_plane/stl/examples`. -Python API library is located here: `automation/trex_control_plane/stl/trex_stl_lib` +The Python API library is located here: `automation/trex_control_plane/stl/trex_stl_lib`. -The Console is using the python API library to interact with TRex server and the protocol is JSON-RPC2 over ZMQ +The TRex Console uses the python API library to interact with the TRex server using the JSON-RPC2 protocol over ZMQ. *file*:: link:{github_stl_examples_path}/stl_bi_dir_flows.py[stl_bi_dir_flows.py] @@ -594,13 +594,13 @@ def simple_burst (): # run the tests simple_burst() ---- -<1> import the stl_path. you should *fix* the path to point to your stl_trex library path -<2> import trex Stateless library. path should be fixed -<3> create packet per direction using Scapy -<4> This is something more advanced will be explained later -<5> Connect to local TRex username , server can be added -<6> Acquire the ports -<7> load the profile and start the traffic +<1> Import the stl_path. You should *fix* the path to point to your stl_trex library path. +<2> Import TRex Stateless library. The path should be fixed. +<3> Create packet per direction using Scapy. +<4> This is something more advanced will be explained later. +<5> Connect to local TRex. Username and server can be added. +<6> Acquire the ports. +<7> Load the profile and start the traffic <8> Wait for the traffic to, be finished. There is a polling function so you can test do something while waiting <9> Get port statistics <10> Disconnect @@ -791,7 +791,7 @@ class STLS1(object): return [ self.create_stream() ] -# dynamic load - used for trex console or simulator +# dynamic load - used for TRex console or simulator def register(): <3> return STLS1() ---- -- cgit 1.2.3-korg From d431b9ab93a13ff3d46ee035b32ebb2ecc4d154d Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 10 Mar 2016 21:43:44 +0200 Subject: v1.95 --- release_notes.asciidoc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 60a4fe63..d004d066 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,18 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.95 == + +* TUI support per stream stats (press s to get to this window) +* per stream statistic API examples +* Add Python API automatic documentation scripts +* Fix issue with a packet smaller than 64 bytes + +=== Known issue + +* High speed of start/stop of per stream stats give zero in statistics + + == Release 1.94 == * Fix Python API stop/sync issue. Now TX counters are synced in case of stop API -- cgit 1.2.3-korg From e7c82f8b7d4717edd31b16f7c9b34ddf74cd3eec Mon Sep 17 00:00:00 2001 From: beubanks Date: Thu, 10 Mar 2016 16:28:20 -0500 Subject: fix TRex package folders table markdown --- draft_trex_stateless.asciidoc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 32126b8c..784c497e 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -184,7 +184,7 @@ If you are confused you probably need Stateless. | /automation/trex_control_plane/stl | Stateless lib and Console | /automation/trex_control_plane/stl/trex_stl_lib | Stateless lib | /automation/trex_control_plane/stl/examples | Stateless Examples -|================= +|============================= === Basic Tutorials @@ -625,16 +625,16 @@ simple_burst() <5> Connect to local TRex. Username and server can be added. <6> Acquire the ports. <7> Load the profile and start the traffic -<8> Wait for the traffic to, be finished. There is a polling function so you can test do something while waiting +<8> Wait for the traffic to be finished. There is a polling function so you can test do something while waiting <9> Get port statistics <10> Disconnect -==== Tutorials HLT Python API +==== Tutorial: HLT Python API -HLT Python API is a layer on top the native layer. It supports the standard Cisco traffic generator API -See more in Cisco/IXIA/Spirent documentation -TRex supported a limited number of HLTAPI arguments and the recommendation is to use the native API due to the flexibility and simplicity. +HLT Python API is a layer on top of the native layer. It supports the standard Cisco traffic generator API. +See more in Cisco/IXIA/Spirent documentation. +TRex supported a limited number of HLTAPI arguments and the recommendation is to use the native API due to the flexibility and simplicity. IXIA for example, has a book of ~2000 pages for specifying all the HLTAPI mode of operations. One of the reasons for the 2000 pages is that in the API there is no clear separation between the definition of the template packet, and the fields that need to be changed and the mode of transmission. This creates a bloat of arguments that need to be documented. The supported classs are: @@ -777,7 +777,7 @@ if __name__ == "__main__": *Goal*:: Demonstrates the most basic use case using TRex simulator -The simulator is a tool called `stil-sim` that is part of the TRex package. +The simulator is a tool called `stl-sim` that is part of the TRex package. It is a python script that calls an executable. The executable should run on the same machine that TRex image run (it won't run on an older Linux distributions). -- cgit 1.2.3-korg From 5691a5705864f95e6cf7697bfa19637327d7e03f Mon Sep 17 00:00:00 2001 From: beubanks Date: Thu, 10 Mar 2016 18:43:18 -0500 Subject: more doc cleanup --- draft_trex_stateless.asciidoc | 46 +++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 784c497e..38c8ab12 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -139,7 +139,7 @@ image::images/stateless_objects.png[title="TRex Entities",align="left",width={p_ ** *Action*: The next stream to go after this stream is finished. Valid for Burst/Continuous mode -=== Statful vs Stateless +=== Stateful vs Stateless TRex Stateless support is basic L2/L3 tests more for Switch/Router. With Stateless it is possible to define a Stream that has a *one* packet template, define a program to change any fields in the packet and run it in continues/burst/multi-burst mode. @@ -150,7 +150,7 @@ a->b->c->d-> back to a And create a program for each stream to change src_ip=10. 0.0.1-10.0.0.254 this will create something similar to Stateful but the underline is totally different. If you are confused you probably need Stateless. -.Statful vs Stateless +.Stateful vs Stateless [cols="1^,3^,3^", options="header"] |================= | Feature | Stateless |Statful @@ -823,7 +823,7 @@ def register(): <2> Mode is Continuous with rate of 1 PPS (default rate is 1 PPS) <3> Each Traffic profile module should have a `register` function -Now let try to run it throw TRex simulator limiting the number of packets to 10 +Now let's try to run it through the TRex simulator while limiting the number of packets to 10 [source,bash] ---- @@ -1046,15 +1046,15 @@ def register(): *Discussion*:: -The following are the main traffic profiles formats. The native is the preferred one. There is a separation between how the traffic is defined and how to control/activate it. The API/Console/GUI can load a traffic profile and start/stop/get a statistic. Due to this separation it is possible to share traffic profiles. +The following are the main traffic profile formats. Native is the preferred format. There is a separation between how the traffic is defined and how to control/activate it. The API/Console/GUI can load a traffic profile and start/stop/get a statistic. Due to this separation it is possible to share traffic profiles. .Traffic profiles formats [cols="1^,1^,10<", options="header",width="80%"] |================= | Profile Type | Format | Description -| Native | Python | A native Python like. Have the most flexibility. any format can be converted to native using `stl-sim` using --native option -| HLT | Python | HLT arguments like -| YAML | YAML | It is the common denominator traffic profile. We suggest not to use it by human as it is not possible to compose packet using scapy. it is used to move profile between GUI and Console or API. It can be converted to native using the stl-sim using --native switch +| Native | Python | Has the most flexibility. Any format can be converted to native using `stl-sim` using --native option +| HLT | Python | Uses HLT arguments +| YAML | YAML | It is the common denominator traffic profile. We suggest not to use it by human as it is not possible to compose packet using scapy. it is used to move a profile between GUI and Console or API. It can be converted to native using the stl-sim using --native switch |================= @@ -1062,7 +1062,7 @@ The following are the main traffic profiles formats. The native is the preferred ==== Tutorial: Simple Interleave streams -*Goal*:: Demonstrate number of interleave streams +*Goal*:: Demonstrate interleaving of multiple streams The following example demonstrates 3 streams with different rates (pps=10,20,40) and different start time ISG (0,25msec,50msec) @@ -1132,13 +1132,13 @@ trex>start -f stl/simple_3pkt.py -m 10mbps -a ==== Tutorial: Multi burst streams - action next stream -*Goal*:: profile with stream that trigger a stream +*Goal*:: Create a profile with a stream that trigger another stream -The following example demonstrates +The following example demonstrates: 1. More than one stream 2. Burst of 10 packets -3. Stream activate a Stream (self_start=False) +3. One Stream activates another Stream (self_start=False) *file*:: link:{github_stl_path}/burst_3pkt_60pkt.py[stl/burst_3pkt_60pkt.py] @@ -1175,24 +1175,24 @@ The following example demonstrates ]).get_streams() ---- -<1> Stream S0 is with self_start=True start after 10 sec -<2> S1 with self_start=False. S0 activate it -<3> S2 is activate by S1 +<1> Stream S0 is with self_start=True, start after 10 sec +<2> S1 with self_start=False. S0 activates it +<3> S2 is activated by S1 To run the simulator run this command [source,bash] ---- -$ ./stl-sim -f stl/stl/burst_3pkt_600pkt.py -o b.pcap +$ ./stl-sim -f stl/stl/burst_3pkt_60pkt.py -o b.pcap ---- -The pcap file should have 60 packets. The first 10 packets has src_ip=16.0.0.1. The next 20 packets has src_ip=16.0.0.2. The next 30 packets has src_ip=16.0.0.3 +The pcap file should have 60 packets. The first 10 packets have src_ip=16.0.0.1. The next 20 packets has src_ip=16.0.0.2. The next 30 packets has src_ip=16.0.0.3 This profile can be run from Console using this command [source,bash] ---- -TRex>start -f stl/stl/burst_3pkt_600pkt.py --port 0 +TRex>start -f stl/stl/burst_3pkt_60pkt.py --port 0 ---- ==== Tutorial: Multi Burst mode @@ -1213,13 +1213,13 @@ TRex>start -f stl/stl/burst_3pkt_600pkt.py --port 0 pad = max(0, size - len(base_pkt)) * 'x' - return STLProfile( [ STLStream( isg = 10.0, # star in delay <1> + return STLProfile( [ STLStream( isg = 10.0, # start in delay <1> name ='S0', packet = STLPktBuilder(pkt = base_pkt/pad), mode = STLTXSingleBurst( pps = 10, total_pkts = 10), next = 'S1'), # point to next stream - STLStream( self_start = False, # stream is disabled enable trow S0 <2> + STLStream( self_start = False, # stream is disabled. Enabled by S0 <2> name ='S1', packet = STLPktBuilder(pkt = base_pkt1/pad), mode = STLTXMultiBurst( pps = 1000, @@ -1231,8 +1231,8 @@ TRex>start -f stl/stl/burst_3pkt_600pkt.py --port 0 ]).get_streams() ---- -<1> Stream S0 wait 10 usec(isg) and send burst of 10 packet in 10 PPS rate -<2> Multi burst of 5 bursts of 4 packets with a inter burst gap of one second +<1> Stream S0 will wait 10 usec(isg) and then send a burst of 10 packet at 10 PPS rate +<2> Multi burst of 5 bursts of 4 packets with an inter burst gap of one second image::images/stl_tut_4.png[title="Streams example",align="left",width={p_width}, link="images/stl_tut_4.png"] @@ -1262,13 +1262,13 @@ image::images/stl_tut_4.png[title="Streams example",align="left",width={p_width} mode = STLTXSingleBurst( pps = 10, total_pkts = 1), next = 'S1'), # point to next stream - STLStream( self_start = False, # stream is disabled enable trow S0 + STLStream( self_start = False, # stream is disabled. Enabled by S0 name ='S1', packet = STLPktBuilder(pkt = base_pkt1/pad), mode = STLTXSingleBurst( pps = 10, total_pkts = 2), next = 'S2' ), - STLStream( self_start = False, # stream is disabled enable trow S0 + STLStream( self_start = False, # stream is disabled. Enabled by S1 name ='S2', packet = STLPktBuilder(pkt = base_pkt2/pad), mode = STLTXSingleBurst( pps = 10, total_pkts = 3 ), -- cgit 1.2.3-korg From 1e4e12773cd7ff0674ff8bce9444b7b63bd42677 Mon Sep 17 00:00:00 2001 From: beubanks Date: Fri, 11 Mar 2016 16:34:28 -0500 Subject: more document edit/cleanup --- draft_trex_stateless.asciidoc | 86 +++++++++++++++++++++---------------------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 38c8ab12..e3ddada1 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -685,7 +685,7 @@ if __name__ == "__main__": """, description="Example for TRex HLTAPI", - epilog=" based on hhaim's stl_run_udp_simple example"); + epilog=" based on hhaim's stl_run_udp_simple example") parser.add_argument("--ip", dest="ip", @@ -720,7 +720,7 @@ if __name__ == "__main__": help='dst MAC', default='00:50:56:b9:34:f3') - args = parser.parse_args(); + args = parser.parse_args() hltapi = CTRexHltApi() print 'Connecting to TRex' @@ -1073,7 +1073,7 @@ The following example demonstrates 3 streams with different rates (pps=10,20,40) def create_stream (self): # create a base packet and pad it to size - size = self.fsize - 4; # no FCS + size = self.fsize - 4 # no FCS base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) <1> base_pkt1 = Ether()/IP(src="16.0.0.2",dst="48.0.0.1")/UDP(dport=12,sport=1025) base_pkt2 = Ether()/IP(src="16.0.0.3",dst="48.0.0.1")/UDP(dport=12,sport=1025) @@ -1148,7 +1148,7 @@ The following example demonstrates: def create_stream (self): # create a base packet and pad it to size - size = self.fsize - 4; # no FCS + size = self.fsize - 4 # no FCS base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) base_pkt1 = Ether()/IP(src="16.0.0.2",dst="48.0.0.1")/UDP(dport=12,sport=1025) base_pkt2 = Ether()/IP(src="16.0.0.3",dst="48.0.0.1")/UDP(dport=12,sport=1025) @@ -1207,7 +1207,7 @@ TRex>start -f stl/stl/burst_3pkt_60pkt.py --port 0 def create_stream (self): # create a base packet and pad it to size - size = self.fsize - 4; # no FCS + size = self.fsize - 4 # no FCS base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) base_pkt1 = Ether()/IP(src="16.0.0.2",dst="48.0.0.1")/UDP(dport=12,sport=1025) pad = max(0, size - len(base_pkt)) * 'x' @@ -1249,20 +1249,20 @@ image::images/stl_tut_4.png[title="Streams example",align="left",width={p_width} def create_stream (self): # create a base packet and pad it to size - size = self.fsize - 4; # no FCS + size = self.fsize - 4 # no FCS base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) base_pkt1 = Ether()/IP(src="16.0.0.2",dst="48.0.0.1")/UDP(dport=12,sport=1025) base_pkt2 = Ether()/IP(src="16.0.0.3",dst="48.0.0.1")/UDP(dport=12,sport=1025) pad = max(0, size - len(base_pkt)) * 'x' - return STLProfile( [ STLStream( isg = 10.0, # star in delay + return STLProfile( [ STLStream( isg = 10.0, # start in delay name ='S0', packet = STLPktBuilder(pkt = base_pkt/pad), mode = STLTXSingleBurst( pps = 10, total_pkts = 1), next = 'S1'), # point to next stream - STLStream( self_start = False, # stream is disabled. Enabled by S0 + STLStream( self_start = False, # stream is disabled. Enabled by S0 name ='S1', packet = STLPktBuilder(pkt = base_pkt1/pad), mode = STLTXSingleBurst( pps = 10, total_pkts = 2), @@ -1273,7 +1273,7 @@ image::images/stl_tut_4.png[title="Streams example",align="left",width={p_width} packet = STLPktBuilder(pkt = base_pkt2/pad), mode = STLTXSingleBurst( pps = 10, total_pkts = 3 ), action_count = 2, # loop 2 times <1> - next = 'S0' # back to S0 loop + next = 'S0' # loop back to S0 ) ]).get_streams() @@ -1281,11 +1281,11 @@ image::images/stl_tut_4.png[title="Streams example",align="left",width={p_width} <1> go back to S0 but limit it to 2 loops -==== Tutorial: IMIX with UDP packets directional +==== Tutorial: IMIX with UDP packets, bi-directional -*Goal* : Demonstrate how to create IMIX +*Goal* : Demonstrate how to create an IMIX traffic profile. -This profile has 3 streams each with different size packet. The rate is different for each stream/size see link:https://en.wikipedia.org/wiki/Internet_Mix[here] +This profile has 3 streams, each with different size packet. The rate is different for each stream/size. See link:https://en.wikipedia.org/wiki/Internet_Mix[here] *file*:: link:{github_stl_path}/imix.py[stl/imix.py] @@ -1298,7 +1298,7 @@ This profile has 3 streams each with different size packet. The rate is differen # default IMIX properties self.imix_table = [ {'size': 60, 'pps': 28, 'isg':0 }, - {'size': 590, 'pps': 20, 'isg':0.1 }, + {'size': 590, 'pps': 16, 'isg':0.1 }, {'size': 1514, 'pps': 4, 'isg':0.2 } ] @@ -1352,19 +1352,19 @@ This profile has 3 streams each with different size packet. The rate is differen ---- <1> Base on the direction, we will construct a diffrent stream (replace src and dest) <2> Even port id has direction==0 and odd has direction==1 -<3> We didn't explain this yet. but this is a Field Engine program to change fields inside the packets +<3> We didn't explain this yet. This is a Field Engine program to change fields inside the packets ==== Tutorial: Field Engine, Syn attack The following example demonstrates changing packet fields. -The Field Engine (FE) has limited number of instructions/operation for supporting most use cases. There is a plan to add LuaJIT to be more flexiable in the cost of performance. -The FE can allocate stream variable in Stream context. Write a stream variable to a packet offset, change packet size etc. +The Field Engine (FE) has limited number of instructions/operation for supporting most use cases. There is a plan to add LuaJIT to be more flexible at the cost of performance. +The FE can allocate stream variables in a Stream context, write a stream variable to a packet offset, change packet size, etc. *Some examples for what can be done:* * Change ipv4.tos 1-10 -* Change packet size to be random in range 64-9K -* Create range of flows (change src_ip,dest_ip,src_port,dest_port) +* Change packet size to be random in the range 64-9K +* Create range of flows (change src_ip, dest_ip, src_port, dest_port) * Update IPv4 checksum for more info see link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_obj_a[here] @@ -1397,7 +1397,7 @@ The following example demonstrates creating SYN attack from many src to one serv STLVmFixIpv4(offset = "IP"), # fix checksum <5> STLVmWrFlowVar(fv_name="src_port", <6> - pkt_offset= "TCP.sport") # fix udp len + pkt_offset= "TCP.sport") # U ] ) @@ -1412,9 +1412,9 @@ The following example demonstrates creating SYN attack from many src to one serv <1> Create SYN packet using Scapy <2> Define stream variable name=ip_src, 4 bytes size for IPv4. <3> Define stream variable name=src_port, 2 bytes size for port. -<4> Write ip_src stream var into `IP.src` packet offset. Scapy calculate the offset. We could gave `IP:1.src" for second IP header in the packet +<4> Write ip_src stream var into `IP.src` packet offset. Scapy calculates the offset. We could gave `IP:1.src" for second IP header in the packet <5> Fix IPv4 checksum. here we provide the header name `IP` we could gave `IP:1` for second IP -<6> Update TCP src port- TCP checksum is not updated here +<6> Write src_port stream var into `TCP.sport` packet offset. TCP checksum is not updated here WARNING: Original Scapy does not have the capability to calculate offset for a header/field by name. This offset capability won't work for all the cases because there could be complex cases that Scapy rebuild the header. In such cases put offset as a number @@ -1436,7 +1436,7 @@ pkt,Client IPv4,Client Port ==== Tutorial: Field Engine, Tuple Generator The following example demonstrates creating multiply flows from the same packet template. -The TupleGenerator instructions are used to create two stream variables with IP, port see link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_obj_a[here] +The Tuple Generator instructions are used to create two stream variables for IP, port. See link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_obj_a[here] *file*:: link:{github_stl_path}/udp_1pkt_tuple_gen.py[stl/udp_1pkt_tuple_gen.py] @@ -1456,14 +1456,14 @@ The TupleGenerator instructions are used to create two stream variables with IP, STLVmFixIpv4(offset = "IP"), STLVmWrFlowVar (fv_name="tuple.port", pkt_offset= "UDP.sport" ) <3> ] - ); + ) pkt = STLPktBuilder(pkt = base_pkt/pad, vm = vm) ---- -<1> Define struct with two dependent varibles tuple.ip tuple.port -<2> Write tuple.ip to IPv4 src field offset -<3> Write tuple.port to UDP header. You should set UDP.checksum to zero +<1> Define struct with two dependent variables: tuple.ip, tuple.port +<2> Write tuple.ip variable to `IPv4.src` field offset +<3> Write tuple.port variable to `UDP.sport` field offset. You should set UDP.checksum to zero .Pcap file output @@ -1479,7 +1479,7 @@ pkt,Client IPv4,Client Port |================= * Number of clients are two. 16.0.0.1 and 16.0.0.2 -* Number of flows is limited to 129020 (2*65535-1025) +* Number of flows is limited to 129020 (2 * (65535-1025)) * The stream variable size should match the size of the FlowVarWr instruction ==== Tutorial: Field Engine, write to a bit-field packet @@ -1543,14 +1543,14 @@ The way to do it is: def create_stream (self): # pkt - p_l2 = Ether(); + p_l2 = Ether() p_l3 = IP(src="16.0.0.1",dst="48.0.0.1") p_l4 = UDP(dport=12,sport=1025) - pyld_size = max(0, self.max_pkt_size_l3 - len(p_l3/p_l4)); + pyld_size = max(0, self.max_pkt_size_l3 - len(p_l3/p_l4)) base_pkt = p_l2/p_l3/p_l4/('\x55'*(pyld_size)) - l3_len_fix =-(len(p_l2)); - l4_len_fix =-(len(p_l2/p_l3)); + l3_len_fix =-(len(p_l2)) + l4_len_fix =-(len(p_l2/p_l3)) # vm @@ -1616,7 +1616,7 @@ bind_layers(VXLAN, Ether) class STLS1(object): def __init__ (self): - pass; + pass def create_stream (self): pkt = Ether()/IP()/UDP(sport=1337,dport=4789)/VXLAN(vni=42)/Ether()/IP()/('x'*20) <2> @@ -1678,13 +1678,13 @@ Then traffic can be sent from client side A->C class STLS1(object): def __init__ (self): - self.num_clients =30000; # max is 16bit + self.num_clients =30000 # max is 16bit self.fsize =64 def create_stream (self): # create a base packet and pad it to size - size = self.fsize - 4; # no FCS + size = self.fsize - 4 # no FCS base_pkt = Ether(src="00:00:dd:dd:00:01")/ IP(src="55.55.1.1",dst="58.55.1.1")/UDP(dport=12,sport=1025) pad = max(0, size - len(base_pkt)) * 'x' @@ -1853,7 +1853,7 @@ class STLS1(object): """ def __init__ (self): - self.max_pkt_size_l3 =9*1024; + self.max_pkt_size_l3 =9*1024 def create_stream (self): @@ -1913,7 +1913,7 @@ class STLS1(object): """ def __init__ (self): - self.max_pkt_size_l3 =9*1024; + self.max_pkt_size_l3 =9*1024 def create_stream (self): @@ -2286,14 +2286,14 @@ The STLVmWrMaskFlowVar is a handy instruction. The pseudocode is as follows: .Pseudocode [source,bash] ---- - uint32_t val=(cast_to_size)rd_from_varible("name"); # read flow-var - val+=m_add_value; # add value + uint32_t val=(cast_to_size)rd_from_varible("name") # read flow-var + val+=m_add_value # add value if (m_shift>0) { # shift - val=val<>(-m_shift); + val=val>>(-m_shift) } } @@ -2435,7 +2435,7 @@ def create_streams (self, direction = 0, **args): port_id = args.get('port_id') if port_id==None: - port_id=0; + port_id=0 if port_id == 0: return [STLHltStream(tcp_src_port_mode = 'decrement', @@ -2494,13 +2494,13 @@ The Console will give the port/direction and will get the right stream in each i class STLS1(object): def __init__ (self): - self.num_clients =30000; # max is 16bit <1> + self.num_clients =30000 # max is 16bit <1> self.fsize =64 def create_stream (self): # create a base packet and pad it to size - size = self.fsize - 4; # no FCS + size = self.fsize - 4 # no FCS base_pkt = Ether(src="00:00:dd:dd:00:01")/IP(src="55.55.1.1",dst="58.0.0.1")/UDP(dport=12,sport=1025) pad = max(0, size - len(base_pkt)) * 'x' -- cgit 1.2.3-korg From eb5c189fa8c8af46c9f71a9eb0a9641135da013f Mon Sep 17 00:00:00 2001 From: beubanks Date: Sat, 12 Mar 2016 16:25:57 -0500 Subject: continuing edits --- draft_trex_stateless.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index e3ddada1..c6b8b405 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -1484,8 +1484,8 @@ pkt,Client IPv4,Client Port ==== Tutorial: Field Engine, write to a bit-field packet -The following example demonstrates a way to write a stream variable to a bit field packet variables. -In this example MPLS label field will be changed. +The following example demonstrates a way to write a stream variable to a bit field packet variable. +In this example an MPLS label field will be changed. .MPLS header [cols="32", halign="center",width="50%"] @@ -1531,7 +1531,7 @@ In this example MPLS label field will be changed. ==== Tutorial: Field Engine, Random packet size The following example demonstrates a way to to change packet size to be a random size. -The way to do it is: +The way to do it is: 1. Define template packet with maximum size 2. Trim the packet to the size you want 3. Update the packet fields to the new size -- cgit 1.2.3-korg From df5896a8f484d67759e74faa01576994b7f77532 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 13 Mar 2016 15:07:12 +0200 Subject: minor --- draft_trex_stateless.asciidoc | 78 ++++++++++++++++++++++++++++++++++++++++--- trex_book.asciidoc | 4 +++ 2 files changed, 78 insertions(+), 4 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index c6b8b405..7ff203a7 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -52,6 +52,12 @@ endif::backend-xhtml11[] ** Python HLTAPI Client API * Multi user support - multiple users can interact with the same TRex simultaneously +==== Prerequisite + +This document assumes that you know what is TRex and you already installed and configured it. To read more about it see here link:trex_manual.html[manual] + +You should read up to this link:trex_manual.html#_basic_usage[basic usage] + ==== Traffic profile example image::images/stl_streams_example.png[title="Streams example",align="left",width={p_width}, link="images/stl_streams_example.png"] @@ -244,12 +250,16 @@ The SRC/DST MAC addrees are taken from /etc/trex_cfg.yaml. if you want to change There is no need to install any python packages (including scapy). The TRex package includes all the packages it requires ===================================================================== - [source,bash] ---- $sudo ./t-rex-64 -i ---- +* You should wait until the server is up and running. +* You can add `-c` for adding more cores +* You can add `--cfg` for different configuration file + + *Connect with Console*:: From the same machine in a different terminal (either open a new window using `xterm`, or `ssh` again) run the folowing command @@ -375,6 +385,65 @@ Port Statistics dashboard: 'p' - pause, 'c' - clear, '-' - low 5%, '+' - up 5%, ---- + +*Discussion*:: + +In this example TRex sends the *same* packet from all the ports. If your setup is connected with loopback you will see Tx packets from port 0 in Rx port 1 and vice versa. If however you are having DUT with static route you might see all the packets going to a specific port. + +.Static route +[source,bash] +---- +interface TenGigabitEthernet0/0/0 + mtu 9000 + ip address 1.1.9.1 255.255.255.0 +! +interface TenGigabitEthernet0/1/0 + mtu 9000 + ip address 1.1.10.1 255.255.255.0 +! + +ip route 16.0.0.0 255.0.0.0 1.1.9.2 +ip route 48.0.0.0 255.0.0.0 1.1.10.2 +---- + +In this example all the packets will be routed to port `TenGigabitEthernet0/1/0` + +To solve this there is a way to use direction flag in the script + +*file*:: link:{github_stl_path}/udp_1pkt_simple_bdir.py[stl/udp_1pkt_simple_bdir.py] + +[source,python] +---- + + class STLS1(object): + + def create_stream (self): + return STLStream( + packet = + STLPktBuilder( + pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/ + UDP(dport=12,sport=1025)/(10*'x') + ), + mode = STLTXCont()) + + def get_streams (self, direction = 0): + # create 1 stream + if direction==0: <1> + src_ip="16.0.0.1" + dst_ip="48.0.0.1" + else: + src_ip="48.0.0.1" + dst_ip="16.0.0.1" + + pkt = STLPktBuilder( + pkt = Ether()/IP(src=src_ip,dst=dst_ip)/ + UDP(dport=12,sport=1025)/(10*'x') ) + + return [ STLStream( packet = pkt,mode = STLTXCont()) ] +---- +<1> Usage of direction. The packet will be different for each direction + + ==== Tutorial: Connect from a remote server *Goal*:: Console connect from a remote machine to TRex server @@ -415,6 +484,7 @@ extern PYTHON=/bin/mypython #bash [NOTE] ===================================================================== Client machine should run Python 2.7 and Python 64bit version. Cisco CEL/ADS is supported. Python 3.0 support in WIP +You should have the same tree of source code in the client side. We are working on a zip file that include only the client python/so files ===================================================================== ==== Tutorial: Source and Destination MAC address @@ -1108,7 +1178,7 @@ The folowing figure present the output image::images/stl_inter.png[title="Interleave streams",align="left",width={p_width}, link="images/stl_inter.png"] -Discussion:: +*Discussion*:: 1. stream #1 schedule a packet each 100msec 2. stream #2 schedule a packet each 50msec @@ -2286,8 +2356,8 @@ The STLVmWrMaskFlowVar is a handy instruction. The pseudocode is as follows: .Pseudocode [source,bash] ---- - uint32_t val=(cast_to_size)rd_from_varible("name") # read flow-var - val+=m_add_value # add value + uint32_t val=(cast_to_size)rd_from_variable("name") # read flow-var + val+=m_add_value # add value if (m_shift>0) { # shift val=val< Date: Sun, 13 Mar 2016 15:25:07 +0200 Subject: add cp_stl build --- draft_trex_stateless.asciidoc | 6 ++++++ wscript | 50 +++++++++++++++++++++++++++---------------- 2 files changed, 38 insertions(+), 18 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 7ff203a7..4c0b5784 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -299,6 +299,12 @@ Starting traffic on port(s) [0, 1, 2, 3]: [SUCCESS] <5> Stop on all the ports +[NOTE] +===================================================================== +In case you have a connection *error* look into /etc/trex_cfg.yaml +you should *remove* keywords like `enable_zmq_pub : true` and `zmq_pub_port : 4501` from the file. +===================================================================== + To look into the streams using `streams -a` .Streams diff --git a/wscript b/wscript index c7823d1c..d94be256 100755 --- a/wscript +++ b/wscript @@ -120,7 +120,6 @@ def do_visio(bld): for x in bld.path.ant_glob('visio\\*.vsd'): tg = bld(rule='${VIS} -i ${SRC} -o ${TGT} ', source=x, target=x.change_ext('.png')) -#def build_cp_docs (trex_src_dir, dest_dir = "_build", builder = "html"): def build_cp_docs (task): out_dir = task.outputs[0].abspath() export_path = os.path.join(os.getcwd(), 'build', 'cp_docs') @@ -137,6 +136,36 @@ def build_cp_docs (task): return subprocess.call(build_doc_cmd, cwd = trex_core_docs_path) return (1) +def build_stl_cp_docs (task): + out_dir = task.outputs[0].abspath() + export_path = os.path.join(os.getcwd(), 'build', 'cp_stl_docs') + trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") + if not os.path.isdir(trex_core_git_path): + trex_core_git_path = os.getenv('TREX_CORE_GIT', None) + if trex_core_git_path: # there exists a default directory or the desired ENV variable. + trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc_stl')) + build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( + bld= "html", + src= ".", + dst= out_dir) + ) + return subprocess.call(build_doc_cmd, cwd = trex_core_docs_path) + return (1) + + + +def build_cp(bld,dir,root,callback): + export_path = os.path.join(os.getcwd(), 'build', dir) + trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") + if not os.path.isdir(trex_core_git_path): + trex_core_git_path = os.getenv('TREX_CORE_GIT', None) + if trex_core_git_path: # there exists a default directory or the desired ENV variable. + trex_core_docs_path = os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', root, 'index.rst') + bld(rule=build_cp_docs,target = dir) + else: + raise NameError("Environment variable 'TREX_CORE_GIT' is not defined.") + + def build(bld): bld(rule=my_copy, target='symbols.lang') @@ -218,23 +247,8 @@ def build(bld): bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='trex_console.asciidoc waf.css', target='trex_console.html', scan=ascii_doc_scan) -# bld(rule=build_cp_docs, -# source='1.txt', target='cp_docs', scan=ascii_doc_scan) - - # generate control plane documentation - export_path = os.path.join(os.getcwd(), 'build', 'cp_docs') - trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") - if not os.path.isdir(trex_core_git_path): - trex_core_git_path = os.getenv('TREX_CORE_GIT', None) - if trex_core_git_path: # there exists a default directory or the desired ENV variable. - trex_core_docs_path = os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc', 'index.rst') - bld(rule=build_cp_docs, -# source = '1.txt',#trex_core_docs_path, - target = 'cp_docs') - # build_cp_docs(trex_core_git_path, dest_dir= export_path) - else: - raise NameError("Environment variable 'TREX_CORE_GIT' is not defined.") - + build_cp(bld,'cp_docs','doc',build_cp_docs) + build_cp(bld,'cp_stl_docs','doc_stl',build_stl_cp_docs) class Env(object): @staticmethod -- cgit 1.2.3-korg From 716bb9616aac1b7f64e3f5ea7aeeb1294f0eef6f Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 14 Mar 2016 17:43:04 +0200 Subject: update doc to build stl_api --- draft_trex_stateless.asciidoc | 1 - trex_book.asciidoc | 16 ++- ws_main.py | 327 ++++++++++++++++++++++++++++++++++++++++++ wscript | 318 ++++------------------------------------ 4 files changed, 366 insertions(+), 296 deletions(-) create mode 100644 ws_main.py diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 4c0b5784..976ac2fb 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -3321,7 +3321,6 @@ traffic_config_kwargs = { 'udp_dst_port_step': 1, 'udp_dst_port_count': 1, } - ---- diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 731f62f0..8219c09a 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -113,6 +113,20 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch | E1000 | paravirtualize | vmWare/KVM/VirtualBox |================= + +.XL710 QSFP+ support +[options="header",cols="1,1",width="70%"] +|================= +| QSFP+ | Example +| QSFP+ SR4 optics | Cisco QSFP-40G-SR4-S link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +| QSFP+ LR-4 Optics | Cisco QSFP-40G-LR4-S link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +| QSFP Active Optical Cables (AoC) | QSFP-H40G-AOC link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +| QSFP+ Intel Ethernet Modular Optics | +| QSFP+ DA twin-ax cables | +| Active QSFP+ Copper Cables | Cisco QSFP-4SFP10G-CU link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +|================= + + [IMPORTANT] ===================================== * For VMXNET3 use Ubuntu and *not* Fedora 18. Fedora 18 will crash. @@ -131,8 +145,6 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc + PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc + PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc - - ===================================== .Sample order for UCSC-C220-M3S with 4x10Gb ports diff --git a/ws_main.py b/ws_main.py new file mode 100644 index 00000000..06e45c48 --- /dev/null +++ b/ws_main.py @@ -0,0 +1,327 @@ +#! /usr/bin/env python +# encoding: utf-8 +# hhaim, 2014 (IL) base on WAF book + +""" +call 'waf --targets=waf.pdf' or use 'waf list' to see the targets available +""" + +VERSION='0.0.1' +APPNAME='wafdocs' + +import os, re, shutil +import shlex +import subprocess + + +top = '.' +out = 'build' + +re_xi = re.compile('''^(include|image)::([^.]*.(asciidoc|\\{PIC\\}))\[''', re.M) +def ascii_doc_scan(self): + p = self.inputs[0].parent + node_lst = [self.inputs[0]] + seen = [] + depnodes = [] + while node_lst: + nd = node_lst.pop(0) + if nd in seen: continue + seen.append(nd) + + code = nd.read() + for m in re_xi.finditer(code): + name = m.group(2) + if m.group(3) == '{PIC}': + + ext = '.eps' + if self.generator.rule.rfind('A2X') > 0: + ext = '.png' + + k = p.find_resource(name.replace('{PIC}', ext)) + if k: + depnodes.append(k) + else: + k = p.find_resource(name) + if k: + depnodes.append(k) + node_lst.append(k) + return [depnodes, ()] + + + +import re +def scansize(self): + name = 'image::%s\\{PIC\\}\\[.*,(width|height)=(\\d+)' % self.inputs[0].name[:-4] + re_src = re.compile(name) + lst = self.inputs[0].parent.get_src().ant_glob('*.txt') + for x in lst: + m = re_src.search(x.read()) + if m: + val = str(int(1.6 * int(m.group(2)))) + if m.group(1) == 'width': + w = val + h = "800" + else: + w = "800" + h = val + + ext = self.inputs[0].name[-3:] + if ext == 'eps': + code = '-geometry %sx%s' % (w, h) + elif ext == 'dia': + if m.group(1) == 'width': + h = '' + else: + w = '' + code = '--size %sx%s' % (w, h) + else: + code = '-Gsize="%s,%s"' % (w, h) + break + else: + return ([], '') + + return ([], code) + +def options(opt): + opt.add_option('--exe', action='store_true', default=False, help='Execute the program after it is compiled') + +def configure(conf): + conf.find_program('asciidoc', path_list='/usr/bin/', var='ASCIIDOC') + conf.find_program('sphinx-build', path_list='/usr/local/bin/', var='SPHINX') + pass; + +def convert_to_pdf(task): + input_file = task.outputs[0].abspath() + out_dir = task.outputs[0].parent.get_bld().abspath() + os.system('a2x --no-xmllint -v -f pdf -d article %s -D %s ' %(task.inputs[0].abspath(),out_dir ) ) + return (0) + +def convert_to_pdf_book(task): + input_file = task.outputs[0].abspath() + out_dir = task.outputs[0].parent.get_bld().abspath() + os.system('a2x --no-xmllint -v -f pdf -d book %s -D %s ' %(task.inputs[0].abspath(),out_dir ) ) + return (0) + + +def ensure_dir(f): + if not os.path.exists(f): + os.makedirs(f) + + +def my_copy(task): + input_file=task.outputs[0].abspath() + out_dir=task.outputs[0].parent.get_bld().abspath() + ensure_dir(out_dir) + shutil.copy2(input_file, out_dir+ os.sep+task.outputs[0].name) + return (0) + + +def do_visio(bld): + for x in bld.path.ant_glob('visio\\*.vsd'): + tg = bld(rule='${VIS} -i ${SRC} -o ${TGT} ', source=x, target=x.change_ext('.png')) + +def build_cp_docs (task): + out_dir = task.outputs[0].abspath() + export_path = os.path.join(os.getcwd(), 'build', 'cp_docs') + trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") + if not os.path.isdir(trex_core_git_path): + trex_core_git_path = os.getenv('TREX_CORE_GIT', None) + if trex_core_git_path: # there exists a default directory or the desired ENV variable. + trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc')) + build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( + bld= "html", + src= ".", + dst= out_dir) + ) + return subprocess.call(build_doc_cmd, cwd = trex_core_docs_path) + return (1) + +def build_stl_cp_docs (task): + out_dir = task.outputs[0].abspath() + export_path = os.path.join(os.getcwd(), 'build', 'cp_stl_docs') + trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") + if not os.path.isdir(trex_core_git_path): + trex_core_git_path = os.getenv('TREX_CORE_GIT', None) + if trex_core_git_path: # there exists a default directory or the desired ENV variable. + trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc_stl')) + build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( + bld= "html", + src= ".", + dst= out_dir) + ) + return subprocess.call(build_doc_cmd, cwd = trex_core_docs_path) + return (1) + + + +def build_cp(bld,dir,root,callback): + export_path = os.path.join(os.getcwd(), 'build', dir) + trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") + if not os.path.isdir(trex_core_git_path): + trex_core_git_path = os.getenv('TREX_CORE_GIT', None) + if trex_core_git_path: # there exists a default directory or the desired ENV variable. + trex_core_docs_path = os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', root, 'index.rst') + bld(rule=callback,target = dir) + else: + raise NameError("Environment variable 'TREX_CORE_GIT' is not defined.") + + + +def build(bld): + bld(rule=my_copy, target='symbols.lang') + + for x in bld.path.ant_glob('images\\**\**.png'): + bld(rule=my_copy, target=x) + bld.add_group() + + + for x in bld.path.ant_glob('yaml\\**\**.yaml'): + bld(rule=my_copy, target=x) + bld.add_group() + + + for x in bld.path.ant_glob('video\\**\**.mp4'): + bld(rule=my_copy, target=x) + bld.add_group() + + + for x in bld.path.ant_glob('images\\**\**.jpg'): + bld(rule=my_copy, target=x) + bld.add_group() + + bld(rule=my_copy, target='my_chart.js') + + bld.add_group() # separator, the documents may require any of the pictures from above + + bld(rule='${ASCIIDOC} -b deckjs -o ${TGT} ${SRC[0].abspath()}', + source='trex_config.asciidoc ', target='trex_config_guide.html', scan=ascii_doc_scan) + + + bld(rule='${ASCIIDOC} -b deckjs -o ${TGT} ${SRC[0].abspath()}', + source='trex_preso.asciidoc ', target='trex_preso.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='release_notes.asciidoc waf.css', target='release_notes.html', scan=ascii_doc_scan) + + + bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', + source='trex_book.asciidoc waf.css', target='trex_manual.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', + source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless1.html', scan=ascii_doc_scan) + + bld(rule=convert_to_pdf_book,source='trex_book.asciidoc waf.css', target='trex_book.pdf', scan=ascii_doc_scan) + + bld(rule=convert_to_pdf_book,source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless.pdf', scan=ascii_doc_scan) + + + bld(rule=convert_to_pdf_book,source='trex_vm_manual.asciidoc waf.css', target='trex_vm_manual.pdf', scan=ascii_doc_scan) + + bld(rule=convert_to_pdf_book,source='trex_control_plane_peek.asciidoc waf.css', target='trex_control_plane_peek.pdf', scan=ascii_doc_scan) + + bld(rule=convert_to_pdf_book, source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.pdf', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='trex_vm_manual.asciidoc waf.css', target='trex_vm_manual.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='vm_doc.asciidoc waf.css', target='vm_doc.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='packet_builder_yaml.asciidoc waf.css', target='packet_builder_yaml.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='trex_rpc_server_spec.asciidoc waf.css', target='trex_rpc_server_spec.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='trex_control_plane_peek.asciidoc waf.css', target='trex_control_plane_peek.html', scan=ascii_doc_scan) + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', + source='trex_console.asciidoc waf.css', target='trex_console.html', scan=ascii_doc_scan) + + build_cp(bld,'cp_docs','doc',build_cp_docs) + + build_cp(bld,'cp_stl_docs','doc_stl',build_stl_cp_docs) + + +class Env(object): + @staticmethod + def get_env(name) : + s= os.environ.get(name); + if s == None: + print "You should define $",name + raise Exception("Env error"); + return (s); + + @staticmethod + def get_release_path () : + s= Env().get_env('TREX_LOCAL_PUBLISH_PATH'); + s +=get_build_num ()+"/" + return s; + + @staticmethod + def get_remote_release_path () : + s= Env().get_env('TREX_REMOTE_PUBLISH_PATH'); + return s; + + @staticmethod + def get_local_web_server () : + s= Env().get_env('TREX_WEB_SERVER'); + return s; + + # extral web + @staticmethod + def get_trex_ex_web_key() : + s= Env().get_env('TREX_EX_WEB_KEY'); + return s; + + @staticmethod + def get_trex_ex_web_path() : + s= Env().get_env('TREX_EX_WEB_PATH'); + return s; + + @staticmethod + def get_trex_ex_web_user() : + s= Env().get_env('TREX_EX_WEB_USER'); + return s; + + @staticmethod + def get_trex_ex_web_srv() : + s= Env().get_env('TREX_EX_WEB_SRV'); + return s; + + @staticmethod + def get_trex_core() : + s= Env().get_env('TREX_CORE_GIT'); + return s; + + + +def release(bld): + # copy all the files to our web server + core_dir = Env().get_trex_core() + release_dir = core_dir +"/scripts/doc/"; + os.system('mkdir -p '+release_dir) + os.system('cp -rv build/release_notes.* '+ release_dir) + + +def publish(bld): + # copy all the files to our web server + remote_dir = "%s:%s" % ( Env().get_local_web_server(), Env().get_remote_release_path ()+'../doc/') + os.system('rsync -av --rsh=ssh build/ %s' % (remote_dir)) + + +def publish_ext(bld): + from_ = 'build/' + os.system('rsync -avz -e "ssh -i %s" --rsync-path=/usr/bin/rsync %s %s@%s:%s/doc/' % (Env().get_trex_ex_web_key(),from_, Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path() ) ) + + + + + + + + diff --git a/wscript b/wscript index d94be256..2359786f 100755 --- a/wscript +++ b/wscript @@ -9,322 +9,54 @@ call 'waf --targets=waf.pdf' or use 'waf list' to see the targets available VERSION='0.0.1' APPNAME='wafdocs' -import os, re, shutil -import shlex -import subprocess +import ws_main top = '.' out = 'build' -re_xi = re.compile('''^(include|image)::([^.]*.(asciidoc|\\{PIC\\}))\[''', re.M) -def ascii_doc_scan(self): - p = self.inputs[0].parent - node_lst = [self.inputs[0]] - seen = [] - depnodes = [] - while node_lst: - nd = node_lst.pop(0) - if nd in seen: continue - seen.append(nd) - - code = nd.read() - for m in re_xi.finditer(code): - name = m.group(2) - if m.group(3) == '{PIC}': - - ext = '.eps' - if self.generator.rule.rfind('A2X') > 0: - ext = '.png' - - k = p.find_resource(name.replace('{PIC}', ext)) - if k: - depnodes.append(k) - else: - k = p.find_resource(name) - if k: - depnodes.append(k) - node_lst.append(k) - return [depnodes, ()] - - - -import re -def scansize(self): - name = 'image::%s\\{PIC\\}\\[.*,(width|height)=(\\d+)' % self.inputs[0].name[:-4] - re_src = re.compile(name) - lst = self.inputs[0].parent.get_src().ant_glob('*.txt') - for x in lst: - m = re_src.search(x.read()) - if m: - val = str(int(1.6 * int(m.group(2)))) - if m.group(1) == 'width': - w = val - h = "800" - else: - w = "800" - h = val - - ext = self.inputs[0].name[-3:] - if ext == 'eps': - code = '-geometry %sx%s' % (w, h) - elif ext == 'dia': - if m.group(1) == 'width': - h = '' - else: - w = '' - code = '--size %sx%s' % (w, h) - else: - code = '-Gsize="%s,%s"' % (w, h) - break - else: - return ([], '') - - return ([], code) def options(opt): - opt.add_option('--exe', action='store_true', default=False, help='Execute the program after it is compiled') - -def configure(conf): - conf.find_program('asciidoc', path_list='/usr/bin/', var='ASCIIDOC') - conf.find_program('sphinx-build', path_list='/usr/local/bin/', var='SPHINX') - pass; - -def convert_to_pdf(task): - input_file = task.outputs[0].abspath() - out_dir = task.outputs[0].parent.get_bld().abspath() - os.system('a2x --no-xmllint -v -f pdf -d article %s -D %s ' %(task.inputs[0].abspath(),out_dir ) ) - return (0) - -def convert_to_pdf_book(task): - input_file = task.outputs[0].abspath() - out_dir = task.outputs[0].parent.get_bld().abspath() - os.system('a2x --no-xmllint -v -f pdf -d book %s -D %s ' %(task.inputs[0].abspath(),out_dir ) ) - return (0) - - -def ensure_dir(f): - if not os.path.exists(f): - os.makedirs(f) - - -def my_copy(task): - input_file=task.outputs[0].abspath() - out_dir=task.outputs[0].parent.get_bld().abspath() - ensure_dir(out_dir) - shutil.copy2(input_file, out_dir+ os.sep+task.outputs[0].name) - return (0) - - -def do_visio(bld): - for x in bld.path.ant_glob('visio\\*.vsd'): - tg = bld(rule='${VIS} -i ${SRC} -o ${TGT} ', source=x, target=x.change_ext('.png')) + ws_main.options(opt) -def build_cp_docs (task): - out_dir = task.outputs[0].abspath() - export_path = os.path.join(os.getcwd(), 'build', 'cp_docs') - trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") - if not os.path.isdir(trex_core_git_path): - trex_core_git_path = os.getenv('TREX_CORE_GIT', None) - if trex_core_git_path: # there exists a default directory or the desired ENV variable. - trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc')) - build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( - bld= "html", - src= ".", - dst= out_dir) - ) - return subprocess.call(build_doc_cmd, cwd = trex_core_docs_path) - return (1) - -def build_stl_cp_docs (task): - out_dir = task.outputs[0].abspath() - export_path = os.path.join(os.getcwd(), 'build', 'cp_stl_docs') - trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") - if not os.path.isdir(trex_core_git_path): - trex_core_git_path = os.getenv('TREX_CORE_GIT', None) - if trex_core_git_path: # there exists a default directory or the desired ENV variable. - trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc_stl')) - build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( - bld= "html", - src= ".", - dst= out_dir) - ) - return subprocess.call(build_doc_cmd, cwd = trex_core_docs_path) - return (1) - - - -def build_cp(bld,dir,root,callback): - export_path = os.path.join(os.getcwd(), 'build', dir) - trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") - if not os.path.isdir(trex_core_git_path): - trex_core_git_path = os.getenv('TREX_CORE_GIT', None) - if trex_core_git_path: # there exists a default directory or the desired ENV variable. - trex_core_docs_path = os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', root, 'index.rst') - bld(rule=build_cp_docs,target = dir) - else: - raise NameError("Environment variable 'TREX_CORE_GIT' is not defined.") +def configure(conf): + ws_main.configure(conf) def build(bld): - bld(rule=my_copy, target='symbols.lang') - - for x in bld.path.ant_glob('images\\**\**.png'): - bld(rule=my_copy, target=x) - bld.add_group() - - - for x in bld.path.ant_glob('yaml\\**\**.yaml'): - bld(rule=my_copy, target=x) - bld.add_group() - - - for x in bld.path.ant_glob('video\\**\**.mp4'): - bld(rule=my_copy, target=x) - bld.add_group() - - - for x in bld.path.ant_glob('images\\**\**.jpg'): - bld(rule=my_copy, target=x) - bld.add_group() - - bld(rule=my_copy, target='my_chart.js') - - bld.add_group() # separator, the documents may require any of the pictures from above + ws_main.build(bld) - bld(rule='${ASCIIDOC} -b deckjs -o ${TGT} ${SRC[0].abspath()}', - source='trex_config.asciidoc ', target='trex_config_guide.html', scan=ascii_doc_scan) - - - bld(rule='${ASCIIDOC} -b deckjs -o ${TGT} ${SRC[0].abspath()}', - source='trex_preso.asciidoc ', target='trex_preso.html', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', - source='release_notes.asciidoc waf.css', target='release_notes.html', scan=ascii_doc_scan) - - - bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', - source='trex_book.asciidoc waf.css', target='trex_manual.html', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', - source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless1.html', scan=ascii_doc_scan) - - bld(rule=convert_to_pdf_book, - source='trex_book.asciidoc waf.css', target='trex_book.pdf', scan=ascii_doc_scan) - - bld(rule=convert_to_pdf_book, - source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless.pdf', scan=ascii_doc_scan) - - - bld(rule=convert_to_pdf_book, - source='trex_vm_manual.asciidoc waf.css', target='trex_vm_manual.pdf', scan=ascii_doc_scan) - - bld(rule=convert_to_pdf_book, - source='trex_control_plane_peek.asciidoc waf.css', target='trex_control_plane_peek.pdf', scan=ascii_doc_scan) - - bld(rule=convert_to_pdf_book, - source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.pdf', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', - source='trex_vm_manual.asciidoc waf.css', target='trex_vm_manual.html', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', - source='vm_doc.asciidoc waf.css', target='vm_doc.html', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', - source='packet_builder_yaml.asciidoc waf.css', target='packet_builder_yaml.html', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', - source='trex_rpc_server_spec.asciidoc waf.css', target='trex_rpc_server_spec.html', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', - source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.html', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', - source='trex_control_plane_peek.asciidoc waf.css', target='trex_control_plane_peek.html', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', - source='trex_console.asciidoc waf.css', target='trex_console.html', scan=ascii_doc_scan) - - build_cp(bld,'cp_docs','doc',build_cp_docs) - build_cp(bld,'cp_stl_docs','doc_stl',build_stl_cp_docs) - -class Env(object): - @staticmethod - def get_env(name) : - s= os.environ.get(name); - if s == None: - print "You should define $",name - raise Exception("Env error"); - return (s); - - @staticmethod - def get_release_path () : - s= Env().get_env('TREX_LOCAL_PUBLISH_PATH'); - s +=get_build_num ()+"/" - return s; - - @staticmethod - def get_remote_release_path () : - s= Env().get_env('TREX_REMOTE_PUBLISH_PATH'); - return s; - - @staticmethod - def get_local_web_server () : - s= Env().get_env('TREX_WEB_SERVER'); - return s; - - # extral web - @staticmethod - def get_trex_ex_web_key() : - s= Env().get_env('TREX_EX_WEB_KEY'); - return s; - - @staticmethod - def get_trex_ex_web_path() : - s= Env().get_env('TREX_EX_WEB_PATH'); - return s; - - @staticmethod - def get_trex_ex_web_user() : - s= Env().get_env('TREX_EX_WEB_USER'); - return s; - - @staticmethod - def get_trex_ex_web_srv() : - s= Env().get_env('TREX_EX_WEB_SRV'); - return s; - - @staticmethod - def get_trex_core() : - s= Env().get_env('TREX_CORE_GIT'); - return s; +def build_info(bld): + ws_main.build_info(bld) +def pkg(bld): + ws_main.pkg(bld) def release(bld): - # copy all the files to our web server - core_dir = Env().get_trex_core() - release_dir = core_dir +"/scripts/doc/"; - os.system('mkdir -p '+release_dir) - os.system('cp -rv build/release_notes.* '+ release_dir) - + ws_main.release(bld) def publish(bld): - # copy all the files to our web server - remote_dir = "%s:%s" % ( Env().get_local_web_server(), Env().get_remote_release_path ()+'../doc/') - os.system('rsync -av --rsh=ssh build/ %s' % (remote_dir)) - + ws_main.publish(bld) def publish_ext(bld): - from_ = 'build/' - os.system('rsync -avz -e "ssh -i %s" --rsync-path=/usr/bin/rsync %s %s@%s:%s/doc/' % (Env().get_trex_ex_web_key(),from_, Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path() ) ) - + ws_main.publish_ext(bld) + +def publish_web(bld): + ws_main.publish_web(bld) + +def sync(bld): + ws_main.sync(bld) + +def test(bld): + ws_main.test(bld) +def show(bld): + ws_main.show(bld) +def publish_both(bld): + ws_main.publish_both(bld) - - -- cgit 1.2.3-korg From db1d9b461fb4ab78f290a71ff73443dbb6e7beaa Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 14 Mar 2016 18:14:52 +0200 Subject: david comments --- draft_trex_stateless.asciidoc | 41 ++++++++++++++++++++++++++++++++--------- ws_main.py | 2 +- 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 976ac2fb..79533fec 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -80,7 +80,7 @@ TRex has limited functionality compared to IXIA, but has some advantages. The fo [cols="1^,3^,3^,5^", options="header"] |================= | Feature | IXExplorer |TRex | Description -| Line rate | Yes |Almost ~15MPPS/core| +| Line rate | Yes |Almost ~14MPPS/core| | Multi stream | 255 | [green]*Unlimited* | | Packet build flexibility | Limited | [green]*Scapy- Unlimited* | e.g GRE/VXLAN/NSH is supported. Can be extended to future protocols | Packet Field engine | limited | [green]*Unlimited* | @@ -556,6 +556,36 @@ NUMA Node | 0 | 0 | <1> Configure all the ports to be promiscuous <2> Check port promiscuous mode +To change the mode via Python API do this: + +.Python API to change to promiscuous mode +[source,python] +---- + c = STLClient(verbose_level = LoggerApi.VERBOSE_REGULAR) + + c.connect() + + my_ports=[0,1] + + # prepare our ports + c.reset(ports = my_ports) + + # port info, mac-addr info, speed + print c.get_port_info(my_ports) <1> + + c.set_port_attr(my_ports, promiscuous = True) <2> +---- +<1> Get port info for all the ports +<2> Change port attribute + +See here for more info link:cp_stl_docs/api/client_code.html[Python Client API] + + +[NOTE] +===================================================================== +Interface is not promiscuous mode by default. If you change it to be True, it is better to change it back after your test. +===================================================================== + ==== Tutorial: Python automation *Goal*:: Simple automation test using Python from remote or local machine @@ -2763,16 +2793,9 @@ TRex>start -f stl/hlt/hlt_udp_inc_dec_len_9k.py -m 10mbps -a more profiles and example can be found in `stl/hlt` folder - === Reference -==== Stream - -==== Packet - -==== Field Engine commands - -==== Modes +Have a look link:cp_stl_docs/index.html[Python Client API] === Console commands diff --git a/ws_main.py b/ws_main.py index 06e45c48..42e1e3c1 100644 --- a/ws_main.py +++ b/ws_main.py @@ -208,7 +208,7 @@ def build(bld): source='trex_book.asciidoc waf.css', target='trex_manual.html', scan=ascii_doc_scan) bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', - source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless1.html', scan=ascii_doc_scan) + source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless.html', scan=ascii_doc_scan) bld(rule=convert_to_pdf_book,source='trex_book.asciidoc waf.css', target='trex_book.pdf', scan=ascii_doc_scan) -- cgit 1.2.3-korg From 2db926912df6d5af50f7bbb295c16e6964582269 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 15 Mar 2016 10:19:24 +0200 Subject: fix old imix profile --- draft_trex_stateless.asciidoc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 79533fec..bd9b5092 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -3031,11 +3031,11 @@ $streams --port 0 --streams 0 -f [source,bash] ---- -$start [--force] (port mask) [-f stl/imix.yaml] [-db ab] (duration) (multiplier) +$start [--force] (port mask) [-f stl/imix.py] [-db ab] (duration) (multiplier) stream to load: - -f stl/imix.yaml : load from local disk the streams file + -f stl/imix.py : load from local disk the streams file --db stream that was loaded to db force: @@ -3048,14 +3048,14 @@ examples [source,bash] ---- -$start -a -f stl/imix.yaml -m 10gb +$start -a -f stl/imix.py -m 10gb ---- start this profile on all all ports maximum bandwidth is 10gb [source,bash] ---- -$start -port 1 2 -f stl/imix.yaml -m 100 +$start -port 1 2 -f stl/imix.py -m 100 ---- start this profile on port 1,2 multiply by 100 -- cgit 1.2.3-korg From b0ec947dd1a7730d14ba8109884142c162dffabd Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 15 Mar 2016 14:35:15 +0200 Subject: add fd.io example --- draft_trex_stateless.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index bd9b5092..0dc1b8ae 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -3347,8 +3347,9 @@ traffic_config_kwargs = { ---- +==== FD.IO open source project using TRex - +link:https://gerrit.fd.io/r/gitweb?p=csit.git;a=tree;f=resources/tools/t-rex[here] -- cgit 1.2.3-korg From 518909e4c8d8ab03e54581e307c140eb8cb6d03b Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 15 Mar 2016 15:54:02 +0200 Subject: add malform packet type --- draft_trex_stateless.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 0dc1b8ae..6638e319 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -31,7 +31,8 @@ endif::backend-xhtml11[] * Interface can be configured with multi traffic profiles * Traffic Profile can support multi streams. Scale to 10K streams in parallel * Each Stream -** Packet template - ability to build any packet using Scapy (e.g. MPLS/IPv4/Ipv6/GRE/VXLAN/NSH) +** Packet template - ability to build any packet using Scapy (e.g. MPLS/IPv4/Ipv6/GRE/VXLAN/NSH) +*** It is possible to build malformed packets ** Field engine program *** Ability to change any field inside the packet, for example src_ip = 10.0.0.1-10.0.0.255 *** Ability to change the packet size (e.g. Random packet size 64-9K) -- cgit 1.2.3-korg From 811536d335259bf077e5c334eb3553d07f491004 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 16 Mar 2016 11:57:11 +0200 Subject: minor --- trex_book.asciidoc | 6 ++++++ ws_main.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 8219c09a..41c20e5c 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -1692,3 +1692,9 @@ sudo ./dpdk_nic_bind.py -b <1> <2> We are planning to add MACs to `./dpdk_setup_ports.py -s` ===================================== + + +Q: TRex traffic doesn't show up on Wireshark, So I can't capture the traffic from the TRex port +A: TRex uses DPDK that take ownership on the ports. We are using a Switch with port mirroring to capture the traffic + + diff --git a/ws_main.py b/ws_main.py index 42e1e3c1..6d917e00 100644 --- a/ws_main.py +++ b/ws_main.py @@ -207,7 +207,7 @@ def build(bld): bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', source='trex_book.asciidoc waf.css', target='trex_manual.html', scan=ascii_doc_scan) - bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', + bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless.html', scan=ascii_doc_scan) bld(rule=convert_to_pdf_book,source='trex_book.asciidoc waf.css', target='trex_book.pdf', scan=ascii_doc_scan) -- cgit 1.2.3-korg From 48ab002cbf1786ffbf6c7df4ada65bede28830f7 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 16 Mar 2016 17:16:52 +0200 Subject: rename the object --- draft_trex_stateless.asciidoc | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 6638e319..7a613390 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -1137,7 +1137,7 @@ class STLS1(object): IP(src='101.0.0.1', proto=17, dst='102.0.0.1', chksum=28605, len=46, flags=2L, ihl=5L, id=0) / UDP(dport=2001, sport=2001, len=26, chksum=1176) / Raw(load='\xde\xad\xbe\xef\x00\x01\x06\x07\x08\x09\x0a\x0b\x00\x9b\xe7\xdb\x82M')) - vm = CTRexScRaw([], split_by_field = '') + vm = STLScVmRaw([], split_by_field = '') stream = STLStream(packet = CScapyTRexPktBuilder(pkt = packet, vm = vm), name = 'udp_64B', mac_src_override_by_pkt = 0, @@ -1489,12 +1489,12 @@ The following example demonstrates creating SYN attack from many src to one serv # vm - vm = CTRexScRaw( [ STLVmFlowVar(name="ip_src", + vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src", min_value="16.0.0.0", max_value="18.0.0.254", size=4, op="random"), <2> - STLVmFlowVar(name="src_port", + STLVmFlowVar(name="src_port", min_value=1025, max_value=65000, size=2, op="random"), <3> @@ -1553,7 +1553,7 @@ The Tuple Generator instructions are used to create two stream variables for IP, pad = max(0, size - len(base_pkt)) * 'x' - vm = CTRexScRaw( [ STLVmTupleGen ( ip_min="16.0.0.1", <1> + vm = STLScVmRaw( [ STLVmTupleGen ( ip_min="16.0.0.1", <1> ip_max="16.0.0.2", port_min=1025, port_max=65535, @@ -1614,7 +1614,7 @@ In this example an MPLS label field will be changed. IP(src="16.0.0.1",dst="48.0.0.1")/ UDP(dport=12,sport=1025)/('x'*20) - vm = CTRexScRaw( [ STLVmFlowVar(name="mlabel", <1> + vm = STLScVmRaw( [ STLVmFlowVar(name="mlabel", <1> min_value=1, max_value=2000, size=2, op="inc"), # 2 bytes var <2> @@ -1661,7 +1661,7 @@ The way to do it is: # vm - vm = CTRexScRaw( [ STLVmFlowVar(name="fv_rand", <1> + vm = STLScVmRaw( [ STLVmFlowVar(name="fv_rand", <1> min_value=64, max_value=len(base_pkt), size=2, @@ -1796,7 +1796,7 @@ class STLS1(object): IP(src="55.55.1.1",dst="58.55.1.1")/UDP(dport=12,sport=1025) pad = max(0, size - len(base_pkt)) * 'x' - vm = CTRexScRaw( [ STLVmFlowVar(name="mac_src", + vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src", min_value=1, max_value=self.num_clients, size=2, op="inc"), # 1 byte varible, range 1-10 @@ -1836,7 +1836,7 @@ Let's assume we have two transmitters DP threads # vm - vm = CTRexScRaw( [ STLVmFlowVar(name="ip_src", + vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src", min_value="16.0.0.0", max_value="16.0.0.254", size=4, op="inc"), <1> @@ -1882,7 +1882,7 @@ Let's assume we have two transmitters DP threads # vm - vm = CTRexScRaw( [ STLVmFlowVar(name="ip_src", + vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src", min_value="16.0.0.0", max_value="16.0.0.254", size=4, op="inc"), @@ -1966,7 +1966,7 @@ class STLS1(object): base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S") - vm = CTRexScRaw( [ STLVmFlowVar(name="ip_src", <1> + vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src", <1> min_value="16.0.0.0", max_value="18.0.0.254", size=4, op="inc"), @@ -2026,7 +2026,7 @@ class STLS1(object): base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S") - vm = CTRexScRaw( [ STLVmFlowVar(name="ip_src", + vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src", min_value="16.0.0.0", max_value="18.0.0.254", size=4, op="inc"), @@ -2367,7 +2367,7 @@ The following example demonstrates creating IPv6 packet inside IPv4 packet and c src="2001:4860:0:2001::68")/ UDP(dport=12,sport=1025)/ICMPv6Unknown() - vm = CTRexScRaw( [ + vm = STLScVmRaw( [ # tuple gen for inner Ipv6 STLVmTupleGen ( ip_min="16.0.0.1", ip_max="16.0.0.2", port_min=1025, port_max=65535, @@ -2414,7 +2414,7 @@ The STLVmWrMaskFlowVar is a handy instruction. The pseudocode is as follows: [source,python] ---- - vm = CTRexScRaw( [ STLVmFlowVar(name="mac_src", + vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src", min_value=1, max_value=30, size=2, op="dec",step=1), @@ -2434,7 +2434,7 @@ This will cast stream variable with 2 byte to be 1 byte [source,python] ---- - vm = CTRexScRaw( [ STLVmFlowVar(name="mac_src", + vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src", min_value=1, max_value=30, size=2, op="dec",step=1), @@ -2462,7 +2462,7 @@ The output will be shift by 8 [source,python] ---- - vm = CTRexScRaw( [ STLVmFlowVar(name="mac_src", + vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src", min_value=1, max_value=30, size=2, @@ -2749,7 +2749,7 @@ class STLS1(object): IP(proto=17, chksum=5882, len=9202, ihl=5L, id=0) / UDP(dport=12, sport=1025, len=9182, chksum=55174) / Raw(load='!' * 9174)) - vm = CTRexScRaw([CTRexVmDescFlowVar(name='pkt_len', size=2, op='inc', + vm = STLScVmRaw([CTRexVmDescFlowVar(name='pkt_len', size=2, op='inc', init_value=64, min_value=64, max_value=9216, step=1), CTRexVmDescTrimPktSize(fv_name='pkt_len'), CTRexVmDescWrFlowVar(fv_name='pkt_len', @@ -2765,7 +2765,7 @@ class STLS1(object): IP(proto=17, chksum=5882, len=9202, ihl=5L, id=0) / UDP(dport=12, sport=1025, len=9182, chksum=55174) / Raw(load='!' * 9174)) - vm = CTRexScRaw([CTRexVmDescFlowVar(name='pkt_len', size=2, op='dec', + vm = STLScVmRaw([CTRexVmDescFlowVar(name='pkt_len', size=2, op='dec', init_value=9216, min_value=64, max_value=9216, step=1), CTRexVmDescTrimPktSize(fv_name='pkt_len'), -- cgit 1.2.3-korg From f2cc2455fb70bb8333f33b064e24b09c065ff92a Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 16 Mar 2016 17:30:34 +0200 Subject: fix build of draft - add toc2 --- draft_trex_stateless.asciidoc | 2 +- release_notes.asciidoc | 1 + ws_main.py | 5 ++++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 7a613390..35cf639d 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -2,7 +2,7 @@ TRex Stateless support ====================== :author: TRex team :email: trex.tgen@gmail.com -:revnumber: 2.0 +:revnumber: 1.95 :quotes.++: :numbered: :web_server_url: http://trex-tgn.cisco.com/trex diff --git a/release_notes.asciidoc b/release_notes.asciidoc index d004d066..70adb415 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -20,6 +20,7 @@ ifdef::backend-docbook[] endif::backend-docbook[] + == Release 1.95 == * TUI support per stream stats (press s to get to this window) diff --git a/ws_main.py b/ws_main.py index 6d917e00..90716b4a 100644 --- a/ws_main.py +++ b/ws_main.py @@ -207,9 +207,12 @@ def build(bld): bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', source='trex_book.asciidoc waf.css', target='trex_manual.html', scan=ascii_doc_scan) - bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', + bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless.html', scan=ascii_doc_scan) + bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', + source='draft_trex_stateless_moved1.asciidoc waf.css', target='draft_trex_stateless1.html', scan=ascii_doc_scan) + bld(rule=convert_to_pdf_book,source='trex_book.asciidoc waf.css', target='trex_book.pdf', scan=ascii_doc_scan) bld(rule=convert_to_pdf_book,source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless.pdf', scan=ascii_doc_scan) -- cgit 1.2.3-korg From 21ce94c228a58857bbb0a725ec80cb5250ae8833 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 16 Mar 2016 20:32:12 +0200 Subject: fix breakage --- draft_trex_stateless_moved1.asciidoc | 28 ++++++++++++++++++++++++++++ release_notes.asciidoc | 9 +++++++++ trex_book.asciidoc | 9 +++++++-- 3 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 draft_trex_stateless_moved1.asciidoc diff --git a/draft_trex_stateless_moved1.asciidoc b/draft_trex_stateless_moved1.asciidoc new file mode 100644 index 00000000..b23e408a --- /dev/null +++ b/draft_trex_stateless_moved1.asciidoc @@ -0,0 +1,28 @@ +TRex Stateless support +====================== +:author: TRex team +:email: trex.tgen@gmail.com +:revnumber: 2.0 +:quotes.++: +:numbered: +:web_server_url: http://trex-tgn.cisco.com/trex +:local_web_server_url: csi-wiki-01:8181/trex +:github_stl_path: https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/stl +:github_stl_examples_path: https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/automation/trex_control_plane/stl/examples +:toclevels: 6 + +ifdef::backend-docbook[] +:p_width: 450 +:p_width_1: 200 +endif::backend-docbook[] + +ifdef::backend-xhtml11[] +:p_width: 800 +:p_width_1: 400 +endif::backend-xhtml11[] + + +moved to link:draft_trex_stateless.html[here] + + + diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 70adb415..1a1888fa 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -21,6 +21,15 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.96 [not yet] == + +* Support pyATS/32bit/Python2.x for TCL +* Per stream statistic - Fix High speed of start/stop of giving zero in statistics +* Traffic profile direction/port directive works now see +* Add Python API documentation +* Update per stream statistic documentation + + == Release 1.95 == * TUI support per stream stats (press s to get to this window) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 41c20e5c..f5726493 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -118,14 +118,19 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch [options="header",cols="1,1",width="70%"] |================= | QSFP+ | Example -| QSFP+ SR4 optics | Cisco QSFP-40G-SR4-S link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] -| QSFP+ LR-4 Optics | Cisco QSFP-40G-LR4-S link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +| QSFP+ SR4 optics | APPROVED OPTICS For Intel NICS, Cisco QSFP-40G-SR4-S does *not* work link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +| QSFP+ LR-4 Optics | APPROVED OPTICS For Intel NICS , Cisco QSFP-40G-LR4-S does *not* work link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] | QSFP Active Optical Cables (AoC) | QSFP-H40G-AOC link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] | QSFP+ Intel Ethernet Modular Optics | | QSFP+ DA twin-ax cables | | Active QSFP+ Copper Cables | Cisco QSFP-4SFP10G-CU link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] |================= +[NOTE] +============================================== + For Intel XL710 NICS, Cisco QSFP+ won't work +============================================== + [IMPORTANT] ===================================== -- cgit 1.2.3-korg From 110e3c7abb79e8ab22c4044cbdf01c262e597056 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Thu, 17 Mar 2016 01:18:15 +0200 Subject: add HLT args table generation --- draft_trex_stateless.asciidoc | 143 +----------------------------------------- ws_main.py | 111 +++++++++++++++++++++++--------- 2 files changed, 81 insertions(+), 173 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 35cf639d..6418f55f 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -3205,148 +3205,7 @@ Ether()/IP()/TCP()/"GET / HTTP/1.1\r\nHost: www.google.com\r\n\r\n" ==== HLT supported Arguments anchor:altapi-support[] - -[source,python] ----- - -traffic_config_kwargs = { - 'mode': None, # ( create | modify | remove | reset ) - 'split_by_cores': 'split', # ( split | duplicate | single ) TRex extention: split = split traffic by cores, duplicate = duplicate traffic for all cores, single = run only with sinle core (not implemented yet) - 'consistent_random': False, # TRex extention: False (default): random sequence will be different every run, True: random sequence will be same every run - 'port_handle': None, - 'port_handle2': None, - # stream builder parameters - 'transmit_mode': 'continuous', # ( continuous | multi_burst | single_burst ) - 'rate_pps': None, - 'rate_bps': None, - 'rate_percent': 10, - 'stream_id': None, - 'name': None, - 'bidirectional': 0, - 'direction': 0, # ( 0 | 1 ) TRex extention: 1 = exchange sources and destinations - 'pkts_per_burst': 1, - 'burst_loop_count': 1, - 'inter_burst_gap': 12, - 'length_mode': 'fixed', # ( auto | fixed | increment | decrement | random | imix ) - 'l3_imix1_size': 60, - 'l3_imix1_ratio': 28, - 'l3_imix2_size': 590, - 'l3_imix2_ratio': 20, - 'l3_imix3_size': 1514, - 'l3_imix3_ratio': 4, - 'l3_imix4_size': 9226, - 'l3_imix4_ratio': 0, - #L2 - 'frame_size': 64, - 'frame_size_min': 64, - 'frame_size_max': 64, - 'frame_size_step': 1, - 'l2_encap': 'ethernet_ii', # ( ethernet_ii | ethernet_ii_vlan ) - 'mac_src': '00:00:01:00:00:01', - 'mac_dst': '00:00:00:00:00:00', - 'mac_src2': '00:00:01:00:00:01', - 'mac_dst2': '00:00:00:00:00:00', - 'mac_src_mode': 'fixed', # ( fixed | increment | decrement | random ) - 'mac_src_step': 1, - 'mac_src_count': 1, - 'mac_dst_mode': 'fixed', # ( fixed | increment | decrement | random ) - 'mac_dst_step': 1, - 'mac_dst_count': 1, - 'mac_src2_mode': 'fixed', # ( fixed | increment | decrement | random ) - 'mac_src2_step': 1, - 'mac_src2_count': 1, - 'mac_dst2_mode': 'fixed', # ( fixed | increment | decrement | random ) - 'mac_dst2_step': 1, - 'mac_dst2_count': 1, - # vlan options below can have multiple values for nested Dot1Q headers - 'vlan_user_priority': 1, - 'vlan_priority_mode': 'fixed', # ( fixed | increment | decrement | random ) - 'vlan_priority_count': 1, - 'vlan_priority_step': 1, - 'vlan_id': 0, - 'vlan_id_mode': 'fixed', # ( fixed | increment | decrement | random ) - 'vlan_id_count': 1, - 'vlan_id_step': 1, - 'vlan_cfi': 1, - 'vlan_protocol_tag_id': None, - #L3, general - 'l3_protocol': None, # ( ipv4 | ipv6 ) - 'l3_length_min': 110, - 'l3_length_max': 238, - 'l3_length_step': 1, - #L3, IPv4 - 'ip_precedence': 0, - 'ip_tos_field': 0, - 'ip_mbz': 0, - 'ip_delay': 0, - 'ip_throughput': 0, - 'ip_reliability': 0, - 'ip_cost': 0, - 'ip_reserved': 0, - 'ip_dscp': 0, - 'ip_cu': 0, - 'l3_length': None, - 'ip_id': 0, - 'ip_fragment_offset': 0, - 'ip_ttl': 64, - 'ip_checksum': None, - 'ip_src_addr': '0.0.0.0', - 'ip_dst_addr': '192.0.0.1', - 'ip_src_mode': 'fixed', # ( fixed | increment | decrement | random ) - 'ip_src_step': 1, # ip or number - 'ip_src_count': 1, - 'ip_dst_mode': 'fixed', # ( fixed | increment | decrement | random ) - 'ip_dst_step': 1, # ip or number - 'ip_dst_count': 1, - #L3, IPv6 - 'ipv6_traffic_class': 0, - 'ipv6_flow_label': 0, - 'ipv6_length': None, - 'ipv6_next_header': None, - 'ipv6_hop_limit': 64, - 'ipv6_src_addr': 'fe80:0:0:0:0:0:0:12', - 'ipv6_dst_addr': 'fe80:0:0:0:0:0:0:22', - 'ipv6_src_mode': 'fixed', # ( fixed | increment | decrement | random ) - 'ipv6_src_step': 1, # we are changing only 32 lowest bits; can be ipv6 or number - 'ipv6_src_count': 1, - 'ipv6_dst_mode': 'fixed', # ( fixed | increment | decrement | random ) - 'ipv6_dst_step': 1, # we are changing only 32 lowest bits; can be ipv6 or number - 'ipv6_dst_count': 1, - #L4, TCP - 'l4_protocol': None, # ( tcp | udp ) - 'tcp_src_port': 1024, - 'tcp_dst_port': 80, - 'tcp_seq_num': 1, - 'tcp_ack_num': 1, - 'tcp_data_offset': 5, - 'tcp_fin_flag': 0, - 'tcp_syn_flag': 0, - 'tcp_rst_flag': 0, - 'tcp_psh_flag': 0, - 'tcp_ack_flag': 0, - 'tcp_urg_flag': 0, - 'tcp_window': 4069, - 'tcp_checksum': None, - 'tcp_urgent_ptr': 0, - 'tcp_src_port_mode': 'increment', # ( increment | decrement | random ) - 'tcp_src_port_step': 1, - 'tcp_src_port_count': 1, - 'tcp_dst_port_mode': 'increment', # ( increment | decrement | random ) - 'tcp_dst_port_step': 1, - 'tcp_dst_port_count': 1, - # L4, UDP - 'udp_src_port': 1024, - 'udp_dst_port': 80, - 'udp_length': None, - 'udp_dst_port_mode': 'increment', # ( increment | decrement | random ) - 'udp_src_port_step': 1, - 'udp_src_port_count': 1, - 'udp_src_port_mode': 'increment', # ( increment | decrement | random ) - 'udp_dst_port_step': 1, - 'udp_dst_port_count': 1, -} ----- - +include::build/hlt_args.asciidoc[] ==== FD.IO open source project using TRex diff --git a/ws_main.py b/ws_main.py index 90716b4a..8287316f 100644 --- a/ws_main.py +++ b/ws_main.py @@ -120,50 +120,92 @@ def do_visio(bld): for x in bld.path.ant_glob('visio\\*.vsd'): tg = bld(rule='${VIS} -i ${SRC} -o ${TGT} ', source=x, target=x.change_ext('.png')) -def build_cp_docs (task): - out_dir = task.outputs[0].abspath() - export_path = os.path.join(os.getcwd(), 'build', 'cp_docs') +def get_trex_core_git(): trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") if not os.path.isdir(trex_core_git_path): trex_core_git_path = os.getenv('TREX_CORE_GIT', None) - if trex_core_git_path: # there exists a default directory or the desired ENV variable. - trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc')) - build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( - bld= "html", - src= ".", - dst= out_dir) - ) - return subprocess.call(build_doc_cmd, cwd = trex_core_docs_path) - return (1) + return trex_core_git_path + +def parse_hlt_args(task): + trex_core_git_path = get_trex_core_git() + if not trex_core_git_path: + return 1 + hltapi_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'stl', 'trex_stl_lib', 'trex_stl_hltapi.py')) + header = ['[options="header",cols="<.^1,^.^1,9<.^e"]', '|=================', '^| Argument | Default ^| Comment'] + footer = ['|=================\n'] + hlt_asciidoc = [] + category_regexp = '^(\S+)_kwargs = {$' + comment_line_regexp = '^\s*#\s*(.+)$' + arg_line_regexp = "^\s*'([^']+)':\s*'?([^,']+)'?,\s*#?\s*(.+)?$" + if not os.path.exists(hltapi_path): + raise Exception('Could not find hltapi file: %s' % hltapi_path) + with open(hltapi_path) as f: + in_args = False + for line in f.read().splitlines(): + if not in_args: + if line.startswith('import'): + break + category_line = re.match(category_regexp, line) + if category_line: + hlt_asciidoc.append('\n===== %s\n' % category_line.group(1)) + hlt_asciidoc += header + in_args = True + continue + comment_line = re.match(comment_line_regexp, line) + if comment_line: + hlt_asciidoc.append('3+^.^s| %s' % comment_line.group(1).replace('|', '\|')) + continue + arg_line = re.match(arg_line_regexp, line) + if arg_line: + arg, default, comment = arg_line.groups() + hlt_asciidoc.append('| %s | %s | %s' % (arg, default, comment.replace('|', '\|') if comment else '')) + continue + if line == '}': + hlt_asciidoc += footer + in_args = False + if not len(hlt_asciidoc): + raise Exception('Parsing of hltapi args failed') + with open('build/hlt_args.asciidoc', 'w') as f: + f.write('\n'.join(hlt_asciidoc)) + return 0 + +def build_cp_docs (task): + out_dir = task.outputs[0].abspath() + export_path = os.path.join(os.getcwd(), 'build', 'cp_docs') + trex_core_git_path = get_trex_core_git() + if not trex_core_git_path: # there exists a default directory or the desired ENV variable. + return 1 + trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc')) + build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( + bld= "html", + src= ".", + dst= out_dir) + ) + return subprocess.call(build_doc_cmd, cwd = trex_core_docs_path) def build_stl_cp_docs (task): out_dir = task.outputs[0].abspath() export_path = os.path.join(os.getcwd(), 'build', 'cp_stl_docs') - trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") - if not os.path.isdir(trex_core_git_path): - trex_core_git_path = os.getenv('TREX_CORE_GIT', None) - if trex_core_git_path: # there exists a default directory or the desired ENV variable. - trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc_stl')) - build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( - bld= "html", - src= ".", - dst= out_dir) - ) - return subprocess.call(build_doc_cmd, cwd = trex_core_docs_path) - return (1) + trex_core_git_path = get_trex_core_git() + if not trex_core_git_path: # there exists a default directory or the desired ENV variable. + return 1 + trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc_stl')) + build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( + bld= "html", + src= ".", + dst= out_dir) + ) + return subprocess.call(build_doc_cmd, cwd = trex_core_docs_path) def build_cp(bld,dir,root,callback): export_path = os.path.join(os.getcwd(), 'build', dir) - trex_core_git_path = os.path.join(os.getcwd(), os.pardir, "trex-core") - if not os.path.isdir(trex_core_git_path): - trex_core_git_path = os.getenv('TREX_CORE_GIT', None) - if trex_core_git_path: # there exists a default directory or the desired ENV variable. - trex_core_docs_path = os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', root, 'index.rst') - bld(rule=callback,target = dir) - else: + trex_core_git_path = get_trex_core_git() + if not trex_core_git_path: # there exists a default directory or the desired ENV variable. raise NameError("Environment variable 'TREX_CORE_GIT' is not defined.") + trex_core_docs_path = os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', root, 'index.rst') + bld(rule=callback,target = dir) @@ -191,8 +233,15 @@ def build(bld): bld(rule=my_copy, target='my_chart.js') + build_cp(bld,'hlt_args.asciidoc','stl/trex_stl_lib', parse_hlt_args) + bld.add_group() # separator, the documents may require any of the pictures from above + if os.path.exists('build/hlt_args.asciidoc'): + bld.add_manual_dependency( + bld.path.find_node('draft_trex_stateless.asciidoc'), + 'build/hlt_args.asciidoc') + bld(rule='${ASCIIDOC} -b deckjs -o ${TGT} ${SRC[0].abspath()}', source='trex_config.asciidoc ', target='trex_config_guide.html', scan=ascii_doc_scan) -- cgit 1.2.3-korg From 030437a6d99236d9709b3a61bd92e92fb82aa16b Mon Sep 17 00:00:00 2001 From: imarom Date: Thu, 17 Mar 2016 02:50:29 +0200 Subject: enhanched tunable section --- draft_trex_stateless.asciidoc | 171 ++++++++++++++++++++++++++++++++---------- 1 file changed, 133 insertions(+), 38 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 6418f55f..1369f3e3 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -224,7 +224,7 @@ class STLS1(object): mode = STLTXCont()) <2> - def get_streams (self, direction = 0): <3> + def get_streams (self, direction = 0, **kwargs): <3> # create 1 stream return [ self.create_stream() ] @@ -433,7 +433,7 @@ To solve this there is a way to use direction flag in the script ), mode = STLTXCont()) - def get_streams (self, direction = 0): + def get_streams (self, direction = 0, **kwargs): # create 1 stream if direction==0: <1> src_ip="16.0.0.1" @@ -917,7 +917,7 @@ class STLS1(object): mode = STLTXCont()) <2> - def get_streams (self, direction = 0): + def get_streams (self, direction = 0, **kwargs): # create 1 stream return [ self.create_stream() ] @@ -1422,7 +1422,7 @@ This profile has 3 streams, each with different size packet. The rate is differe mode = STLTXCont(pps = pps)) - def get_streams (self, direction = 0): <1> + def get_streams (self, direction = 0, **kwargs): <1> if direction == 0: <2> src = self.ip_range['src'] @@ -2106,7 +2106,7 @@ There is an assumption that this pcap has one packet. In case it has more only t [source,python] ---- - def get_streams (self, direction = 0): + def get_streams (self, direction = 0, **kwargs): return [STLStream(packet = STLPktBuilder(pkt ="stl/yaml/udp_64B_no_crc.pcap"), # path relative to pwd <1> mode = STLTXCont(pps=10)) ] @@ -2121,7 +2121,7 @@ There is an assumption that this pcap has one packet. In case it has more only t [source,python] ---- - def get_streams (self, direction = 0): + def get_streams (self, direction = 0, **kwargs): return [STLStream(packet = STLPktBuilder(pkt ="yaml/udp_64B_no_crc.pcap", path_relative_to_profile = True), <1> mode = STLTXCont(pps=10)) ] @@ -2492,15 +2492,65 @@ value 0x01 |================= -==== Tutorial: Advance traffic profile - platform [TODO] +==== Tutorial: Advance traffic profile + +As said above, every traffic profile must define the following function: + +[source,python] +---- +def get_streams (self, direction = 0, **kwargs) +---- + +'direction' is a mandatory field that will always be provided for any profile +being loaded. + +Besides that, a profile can be provided with any key-value pairs which can be +used to customize this profile - we call these 'tunables'. + +It is up to the profile to define which tunables it can accept and customize +the output based on them. + +[NOTE] +===================================================================== +All paramteres must be provided with default values. A profile must be loadable with no paramters. +**kwargs contains all the automatically provided values which are not +tunables. +Every tuanble must be expressed as key-value pair with default value. +===================================================================== + + +For example, +let's take a look at a profile called 'pcap_with_vm.py' + +*file*:: link:{github_stl_path}/pcap_with_vm.py[stl/pcap_with_vm.py] + +[source,python] +---- +def get_streams (self, + direction = 0, + ipg_usec = 10.0, + loop_count = 5, + ip_src_range = None, + ip_dst_range = {'start' : '10.0.0.1', 'end': '10.0.0.254'}, + **kwargs) +---- + +This profile gets 'direction' as a tunable and mandatory field. +Define 4 more tunables which the profile decided about, +And automatic values such as 'port_id' which are not tunables will be provided on kwargs. *Direction*:: +Direction is a tunable that will always be provided by the API/console when loading +a profile, but it can be overriden by the user. +It is used to make the traffic profile more usable such as bi-directional profile. +However, a profile is free to ignore this parameter. -To make the traffic profile more usable, the traffic profile support per direction/interface. +As default 'direction' will be equal to port_id % 2, so the *even* ports will be +provided with ''0'' and the *odd* ones with ''1''. [source,python] ---- -def create_streams (self, direction = 0,**args): +def get_streams (self, direction = 0,**kwargs): if direction = 0: rate =100 <1> else: @@ -2531,9 +2581,13 @@ interfaces 1/3 is direction 1 So rate will be changed accordingly. -*Per Interface*:: - -In this case there is a different profile base on interface ID +*Customzing Profiles Using ''port_id''*:: + +**kwargs provide default values that are passed along to the profile. +such a value is 'port_id' - which is the port ID for the profile. + +Using that you can define one can define a complex profile based +on different ID of ports. [source,python] ---- @@ -2541,8 +2595,6 @@ In this case there is a different profile base on interface ID def create_streams (self, direction = 0, **args): port_id = args.get('port_id') - if port_id==None: - port_id=0 if port_id == 0: return [STLHltStream(tcp_src_port_mode = 'decrement', @@ -2590,41 +2642,84 @@ def create_streams (self, direction = 0, **args): .. ---- -The Console will give the port/direction and will get the right stream in each interface - +*Full example using the TRex Console*:: -*Tunable*:: - -[source,python] +Let's take the previous pcap_with_vm.py and examine it with the console: + +[source,bash] ---- - -class STLS1(object): +-=TRex Console v1.1=- - def __init__ (self): - self.num_clients =30000 # max is 16bit <1> - self.fsize =64 +Type 'help' or '?' for supported actions - def create_stream (self): +trex>profile -f stl/pcap_with_vm.py - # create a base packet and pad it to size - size = self.fsize - 4 # no FCS - base_pkt = Ether(src="00:00:dd:dd:00:01")/IP(src="55.55.1.1",dst="58.0.0.1")/UDP(dport=12,sport=1025) - pad = max(0, size - len(base_pkt)) * 'x' - +Profile Information: + + +General Information: +Filename: stl/pcap_with_vm.py +Stream count: 5 + +Specific Information: +Type: Python Module +Tunables: ['direction = 0', 'ip_src_range = None', 'loop_count = 5', 'ipg_usec = 10.0', + "ip_dst_range = {'start': '10.0.0.1', 'end': '10.0.0.254'}"] + +trex> ---- -<1> Define object args - +So we can provide tunables on all those fields. +Let's change some: + [source,bash] ---- -$start -f ex1.py -t "fsize=1500,num_clients=10000" #<1> +trex>start -f stl/pcap_with_vm.py -t ipg_usec=15.0,loop_count=25 + +Removing all streams from port(s) [0, 1, 2, 3]: [SUCCESS] + + +Attaching 5 streams to port(s) [0]: [SUCCESS] + + +Attaching 5 streams to port(s) [1]: [SUCCESS] + + +Attaching 5 streams to port(s) [2]: [SUCCESS] + + +Attaching 5 streams to port(s) [3]: [SUCCESS] + + +Starting traffic on port(s) [0, 1, 2, 3]: [SUCCESS] + +61.10 [ms] + +trex> ---- -<1> Change the Tunable using -t option -Once a profile was defined, it is possible to give a tunable from Console and change the default value. -In this example, change the fsize to 1500 bytes +[source,bash] +---- +We can also customize these to different ports: + +trex>start -f stl/pcap_with_vm.py --port 0 1 -t ipg_usec=15.0,loop_count=25#ipg_usec=100,loop_count=300 + +Removing all streams from port(s) [0, 1]: [SUCCESS] +Attaching 5 streams to port(s) [0]: [SUCCESS] + + +Attaching 5 streams to port(s) [1]: [SUCCESS] + + +Starting traffic on port(s) [0, 1]: [SUCCESS] + +51.00 [ms] + +trex> +---- + ==== Tutorial: Per stream statistics * Per stream statistics is implemented using hardware assist when possible (X710/XL710 Intel NICs flow director rules for example). @@ -2648,7 +2743,7 @@ In this example, change the fsize to 1500 bytes class STLS1(object): - def get_streams (self, direction = 0): + def get_streams (self, direction = 0, **kwargs): return [STLStream(packet = STLPktBuilder( pkt ="stl/yaml/udp_64B_no_crc.pcap"), @@ -2710,7 +2805,7 @@ class STLS1(object): ) ] - def get_streams (self, direction = 0): + def get_streams (self, direction = 0, **kwargs): return self.create_streams() ---- -- cgit 1.2.3-korg From 8fd4d27c1f2d3df1a259395e93fdd6b176178bf6 Mon Sep 17 00:00:00 2001 From: imarom Date: Thu, 17 Mar 2016 06:18:10 +0200 Subject: RX stats doc --- draft_trex_stateless.asciidoc | 146 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 139 insertions(+), 7 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 1369f3e3..3760b944 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -2738,22 +2738,154 @@ trex> ** IPv4 with one vlan tag * Number of concurrent streams you can get statistics for is 128. +We'll demonstrate this with two examples, one that uses the console and one that uses the Python API. + +*Console*:: + +In order to use the console, we'll take a simple profile which defines +two streams and configure them with two different PG IDs. + +*file*:: link:{github_stl_path}/flow_stats.py[stl/flow_stats.py] + [source,python] ---- class STLS1(object): - def get_streams (self, direction = 0, **kwargs): - return [STLStream(packet = - STLPktBuilder( - pkt ="stl/yaml/udp_64B_no_crc.pcap"), - mode = STLTXCont(pps=10), - rx_stats = STLRxStats(pg_id = 7)) <1> + def get_streams (self, direction = 0): + return [STLStream(packet = STLPktBuilder(pkt ="stl/yaml/udp_64B_no_crc.pcap"), + mode = STLTXCont(pps = 1000), + flow_stats = STLFlowStats(pg_id = 7)), <1> + + STLStream(packet = STLPktBuilder(pkt ="stl/yaml/udp_594B_no_crc.pcap"), + mode = STLTXCont(pps = 5000), + flow_stats = STLFlowStats(pg_id = 12)) <2> ] + +---- +<1> assigned to PG ID 7 +<2> assigned to PG ID 12 + +Now we will inject this to the console and use the TUI to see what's going on: + +[source,python] +---- +trex>start -f stl/flow_stats.py --port 0 + +Removing all streams from port(s) [0]: [SUCCESS] + + +Attaching 2 streams to port(s) [0]: [SUCCESS] + + +Starting traffic on port(s) [0]: [SUCCESS] + +155.81 [ms] + +trex>tui + +Streams Statistics + + PG ID | 12 | 7 + -------------------------------------------------- + Tx pps | 5.00 Kpps | 999.29 pps <1> + Tx bps L2 | 23.60 Mbps | 479.66 Kbps + Tx bps L1 | 24.40 Mbps | 639.55 Kbps + --- | | + Rx pps | 5.00 Kpps | 999.29 pps <2> + Rx bps | N/A | N/A <3> + ---- | | + opackets | 222496 | 44500 + ipackets | 222496 | 44500 + obytes | 131272640 | 2670000 + ibytes | N/A | N/A <3> + ----- | | + tx_pkts | 222.50 Kpkts | 44.50 Kpkts + rx_pkts | 222.50 Kpkts | 44.50 Kpkts + tx_bytes | 131.27 MB | 2.67 MB + rx_bytes | N/A | N/A <3> + +---- +<1> TX bandwidth of the streams matches the configured values +<2> RX bandwidth means that no drops were seen +<3> RX BPS is not supported on this platform (no hardware support for BPS) hence the N/A. + + +*Flow Stats Using The Python API*:: + +We'll use the following example: + +[source,python] +---- +def rx_example (tx_port, rx_port, burst_size): + + # create client + c = STLClient() + + try: + pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/ + UDP(dport=12,sport=1025)/IP()/'a_payload_example') + + s1 = STLStream(name = 'rx', + packet = pkt, + flow_stats = STLFlowStats(pg_id = 5), <1> + mode = STLTXSingleBurst(total_pkts = 5000, + percentage = 80 + )) + + # connect to server + c.connect() + + # prepare our ports - TX/RX + c.reset(ports = [tx_port, rx_port]) + + # add the stream to the TX port + c.add_streams([s1], ports = [tx_port]) + + # start and wait for completion + c.start(ports = [tx_port]) + c.wait_on_traffic(ports = [tx_port]) + + # fetch stats for PG ID 5 + flow_stats = c.get_stats()['flow_stats'].get(5) <2> + + tx_pkts = flow_stats['tx_pkts'].get(tx_port, 0) <2> + tx_bytes = flow_stats['tx_bytes'].get(tx_port, 0) <2> + rx_pkts = flow_stats['rx_pkts'].get(rx_port, 0) <2> + +---- +<1> define the stream to use PG ID 5 +<2> the structure of the object ''flow_stats'' is described below + +==== flow_stats object structure +A dictionary which keys are the configured PG IDs. + +The next level is a dictionary which contains 'tx_pkts', 'tx_bytes' and 'rx_pkts'. + +Each one of those keys contain a dictionary of per port values. + + +Here is a printout of flow_stats object for 3 PG IDs after a specific run: + +[source,bash] +---- +{ + 5: {'rx_pkts' : {0: 0, 1: 0, 2: 500000, 3: 0, 'total': 500000}, + 'tx_bytes' : {0: 0, 1: 39500000, 2: 0, 3: 0, 'total': 39500000}, + 'tx_pkts' : {0: 0, 1: 500000, 2: 0, 3: 0, 'total': 500000}}, + + 7: {'rx_pkts' : {0: 0, 1: 0, 2: 0, 3: 288, 'total': 288}, + 'tx_bytes' : {0: 17280, 1: 0, 2: 0, 3: 0, 'total': 17280}, + 'tx_pkts' : {0: 288, 1: 0, 2: 0, 3: 0, 'total': 288}}, + + 12: {'rx_pkts' : {0: 0, 1: 0, 2: 0, 3: 1439, 'total': 1439}, + 'tx_bytes': {0: 849600, 1: 0, 2: 0, 3: 0, 'total': 849600}, + 'tx_pkts' : {0: 1440, 1: 0, 2: 0, 3: 0, 'total': 1440}} +} ---- -<1> Configure this stream to be counted on all RX ports as packet group id 7 +==== TODO * TUI should show Tx/Rx stats [TODO] * Python API to get the info [TODO] -- cgit 1.2.3-korg From eaf2cd52405987e1f4418467e1c9b3b4f999a0b2 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 17 Mar 2016 13:53:30 +0200 Subject: fix note issue --- trex_book.asciidoc | 3 --- 1 file changed, 3 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index f5726493..5a0f41a0 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -126,10 +126,7 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch | Active QSFP+ Copper Cables | Cisco QSFP-4SFP10G-CU link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] |================= -[NOTE] -============================================== For Intel XL710 NICS, Cisco QSFP+ won't work -============================================== [IMPORTANT] -- cgit 1.2.3-korg From a66a4987d66438af1855d765344bee1ba274df46 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 17 Mar 2016 16:58:51 +0200 Subject: minor --- trex_book.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 5a0f41a0..12e85298 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -126,7 +126,7 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch | Active QSFP+ Copper Cables | Cisco QSFP-4SFP10G-CU link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] |================= - For Intel XL710 NICS, Cisco QSFP+ won't work + For Intel XL710 NICS, Cisco SR4/LR QSFP+ won't work [IMPORTANT] @@ -290,7 +290,7 @@ image:images/loopback_right.png[title="rigt"] .Wrong loopback image:images/loopback_wrong.png[title="rigt"] -In case you have 1Gb/Sec Intel NIC (I350) you can do anything you like from the loopback perspective *but* you must filter the management port before see xref:trex_config[here]. +In case you have 1Gb/Sec Intel NIC (I350) or XL710/X710 NIC you can do anything you like from the loopback perspective *but* you must filter the management port before see xref:trex_config[here]. ==== Identify the ports -- cgit 1.2.3-korg From 9afa7bb6753a9ab0c8987b9c848fb377a1811ebd Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 17 Mar 2016 17:08:59 +0200 Subject: add visio --- release_notes.asciidoc | 7 ++++--- visio_drawings/stl_streams_example.vsd | Bin 0 -> 183296 bytes 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 visio_drawings/stl_streams_example.vsd diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 1a1888fa..16e2e189 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -26,9 +26,10 @@ endif::backend-docbook[] * Support pyATS/32bit/Python2.x for TCL * Per stream statistic - Fix High speed of start/stop of giving zero in statistics * Traffic profile direction/port directive works now see -* Add Python API documentation -* Update per stream statistic documentation - +* Add Python API documentation and pyATS2.0 support +* Update per stream statistic documentation see link:draft_trex_stateless.html#_tutorial_per_stream_statistics[per stream statistic] +* Update HLTAPI arguments link:draft_trex_stateless.html#_hlt_supported_arguments_a_id_altapi_support_a[HLTAPI] +* Fix E1000 DPDK driver prints with ESXi == Release 1.95 == diff --git a/visio_drawings/stl_streams_example.vsd b/visio_drawings/stl_streams_example.vsd new file mode 100644 index 00000000..b947c3e8 Binary files /dev/null and b/visio_drawings/stl_streams_example.vsd differ -- cgit 1.2.3-korg From 50d7af7dd6347c9a577ab20c854ba17d577e08ef Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 17 Mar 2016 17:15:38 +0200 Subject: v1.96 release notes --- release_notes.asciidoc | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 16e2e189..29f42649 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -21,16 +21,22 @@ ifdef::backend-docbook[] endif::backend-docbook[] -== Release 1.96 [not yet] == +== Release 1.96 == * Support pyATS/32bit/Python2.x for TCL +* Traffic profile direction/port directive works see link:draft_trex_stateless.html#_tutorial_advance_traffic_profile[here] +* Documentation +** Add Python API documentation link:cp_stl_docs/index.html[here] +** Add pyATS2.0 support link:cp_stl_docs/index.html[here] +** Update per stream statistic documentation see link:draft_trex_stateless.html#_tutorial_per_stream_statistics[per stream statistic] +** Update HLTAPI arguments link:draft_trex_stateless.html#_hlt_supported_arguments_a_id_altapi_support_a[HLTAPI] + +=== fix issues: === + * Per stream statistic - Fix High speed of start/stop of giving zero in statistics -* Traffic profile direction/port directive works now see -* Add Python API documentation and pyATS2.0 support -* Update per stream statistic documentation see link:draft_trex_stateless.html#_tutorial_per_stream_statistics[per stream statistic] -* Update HLTAPI arguments link:draft_trex_stateless.html#_hlt_supported_arguments_a_id_altapi_support_a[HLTAPI] * Fix E1000 DPDK driver prints with ESXi + == Release 1.95 == * TUI support per stream stats (press s to get to this window) -- cgit 1.2.3-korg From 05d881e15f08a5a0a658986342d8dfd22ab49413 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Mon, 21 Mar 2016 11:52:40 +0200 Subject: added Fedora 21 link + brief example of installation --- trex_book.asciidoc | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 12e85298..fd3f9923 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -205,10 +205,15 @@ The ISO images of the described Linux OS can be downloaded from the following li | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-19-x86_64-CHECKSUM[Fedora 19 CHECKSUM] | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-DVD.iso[Fedora 20] | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-CHECKSUM[Fedora 20 CHECKSUM] +| link:http://fedora-mirror01.rbc.ru/pub/fedora/linux/releases/21/Server/x86_64/iso/Fedora-Server-DVD-x86_64-21.iso[Fedora 21] + | link:http://fedora-mirror01.rbc.ru/pub/fedora/linux/releases/21/Server/x86_64/iso/Fedora-Server-21-x86_64-CHECKSUM[Fedora 21 CHECKSUM] | link:http://old-releases.ubuntu.com/releases/14.04.1/ubuntu-14.04-desktop-amd64.iso[Ubuntu 14.04.1] | http://old-releases.ubuntu.com/releases/14.04.1/SHA256SUMS[Ubuntu 14.04 CHECKSUM] |====================================== +For Fedora, you can get link close to your location at: + +https://admin.fedoraproject.org/mirrormanager/mirrors/Fedora + +Choose: "Fedora Linux http" -> releases -> -> Server -> x86_64 -> iso -> Fedora-Server-DVD-x86_64-.iso Then, verify the checksum of the downloaded file matches the linked checksum values with the `sha256sum` command. For example: @@ -224,6 +229,8 @@ $sha256sum Fedora-18-x86_64-DVD.iso Ask your lab admin to install the Linux using CIMC, assign an IP, and set the DNS. Request the sudo or super user password to enable you to ping and SSH. +xref:fedora21_example[Example of installing Fedora 21 Server] + IMPORTANT: To use TRex, you should have sudo on this machine or root password. WARNING: Upgrading the linux Kernel using `yum upgrade` require to build the TRex drivers. @@ -1659,6 +1666,33 @@ The TRex output ---- <1> this counter should be zero +anchor:fedora21_example[] + +=== Fedora 21 *Server* installation + +Download the .iso file from link above, boot with it using Hypervisor or CIMC console. + +Troubleshooting -> install in basic graphics mode + +* In packages selection, choose: + +** C Development Tools and Libraries + +** Development Tools + +** System Tools + +* Set Ethernet configuration if needed + +* Use default hard-drive partitions, reclaim space if needed + +* After installation, edit file /etc/selinux/config + +set: + +SELINUX=disabled + +* Run: + +systemctl disable firewalld + +* Reboot === Troubleshoot common problems, FAQ -- cgit 1.2.3-korg From ff255c59ef82d663e8cc715165fc62c4a343fbb1 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Mon, 21 Mar 2016 15:31:49 +0200 Subject: disable updates in Fedora 21 installation example --- trex_book.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index fd3f9923..c84e77d1 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -1692,6 +1692,10 @@ SELINUX=disabled * Run: + systemctl disable firewalld +* Edit file /etc/yum.repos.d/fedora-updates.repo + +set everywhere: + +enabled=0 + * Reboot === Troubleshoot common problems, FAQ -- cgit 1.2.3-korg From a9624734fdb2b76b00298102ee233993bccf7049 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Mon, 21 Mar 2016 16:07:36 +0200 Subject: move answer to next row --- trex_book.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index c84e77d1..59af4a9a 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -1734,7 +1734,7 @@ We are planning to add MACs to `./dpdk_setup_ports.py -s` ===================================== -Q: TRex traffic doesn't show up on Wireshark, So I can't capture the traffic from the TRex port +Q: TRex traffic doesn't show up on Wireshark, So I can't capture the traffic from the TRex port + A: TRex uses DPDK that take ownership on the ports. We are using a Switch with port mirroring to capture the traffic -- cgit 1.2.3-korg From 9a9179a0e152cfc6e26efc8b86f7f08c4df59cd9 Mon Sep 17 00:00:00 2001 From: itraviv Date: Thu, 24 Mar 2016 19:05:18 +0200 Subject: add google analytics functionality to asciidoc --- draft_trex_stateless.asciidoc | 12 ++++++++++++ draft_trex_stateless_moved1.asciidoc | 11 +++++++++++ packet_builder_yaml.asciidoc | 13 +++++++++++++ release_notes.asciidoc | 13 +++++++++++++ trex_book.asciidoc | 13 ++++++++++++- trex_config.asciidoc | 12 ++++++++++++ trex_console.asciidoc | 15 +++++++++++++++ trex_control_plane_design_phase1.asciidoc | 12 ++++++++++++ trex_control_plane_peek.asciidoc | 12 ++++++++++++ trex_preso.asciidoc | 12 ++++++++++++ trex_rpc_server_spec.asciidoc | 12 ++++++++++++ trex_vm_manual.asciidoc | 12 ++++++++++++ vm_doc.asciidoc | 12 ++++++++++++ 13 files changed, 160 insertions(+), 1 deletion(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 3760b944..8d958465 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -21,6 +21,18 @@ ifdef::backend-xhtml11[] :p_width_1: 400 endif::backend-xhtml11[] +++++ + +++++ == Stateless support (Alpha stage) diff --git a/draft_trex_stateless_moved1.asciidoc b/draft_trex_stateless_moved1.asciidoc index b23e408a..c0fad4be 100644 --- a/draft_trex_stateless_moved1.asciidoc +++ b/draft_trex_stateless_moved1.asciidoc @@ -24,5 +24,16 @@ endif::backend-xhtml11[] moved to link:draft_trex_stateless.html[here] +++++ + +++++ diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc index d52ee4c8..5d58f986 100644 --- a/packet_builder_yaml.asciidoc +++ b/packet_builder_yaml.asciidoc @@ -8,6 +8,19 @@ Packet Builder Language == change log +++++ + +++++ + [options="header",cols="^1,^h,a"] |================= | Version | name | meaning diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 29f42649..fa424a94 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -10,6 +10,19 @@ ifndef::backend-docbook[]









++++++++++++++ +++++ + +++++ + == TRex release notes == endif::backend-docbook[] diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 59af4a9a..256fe5be 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -9,7 +9,18 @@ TRex :local_web_server_url: csi-wiki-01:8181/trex :toclevels: 4 - +++++ + +++++ == Introduction diff --git a/trex_config.asciidoc b/trex_config.asciidoc index 88848c15..148637ec 100755 --- a/trex_config.asciidoc +++ b/trex_config.asciidoc @@ -9,6 +9,18 @@ TRex first time configuration :deckjs_transition: horizontal-slide :scrollable: +++++ + +++++ ++++++++++++++++++ +++++ + + + == Console === Overview diff --git a/trex_control_plane_design_phase1.asciidoc b/trex_control_plane_design_phase1.asciidoc index 663a51cc..c90de74f 100755 --- a/trex_control_plane_design_phase1.asciidoc +++ b/trex_control_plane_design_phase1.asciidoc @@ -6,6 +6,18 @@ TRex Control Plane Design - Phase 1 :quotes.++: :numbered: +++++ + +++++ == Introduction diff --git a/trex_control_plane_peek.asciidoc b/trex_control_plane_peek.asciidoc index 6d3aa134..fecc7938 100755 --- a/trex_control_plane_peek.asciidoc +++ b/trex_control_plane_peek.asciidoc @@ -7,6 +7,18 @@ TRex Control Plane Design - Phase 1 peek :numbered: +++++ + +++++ === TRex traffic generator diff --git a/trex_preso.asciidoc b/trex_preso.asciidoc index 1e66f69c..8d5ce899 100755 --- a/trex_preso.asciidoc +++ b/trex_preso.asciidoc @@ -11,6 +11,18 @@ TRex realistic traffic generator :web_server_url: http://trex-tgn.cisco.com/trex +++++ + +++++ == What problem is being solved? diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index a073c6c3..81d41628 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -9,6 +9,18 @@ The TRex RPC Server :local_web_server_url: csi-wiki-01:8181/trex :toclevels: 4 +++++ + +++++ == Change log diff --git a/trex_vm_manual.asciidoc b/trex_vm_manual.asciidoc index 1ae13e81..3cb07470 100755 --- a/trex_vm_manual.asciidoc +++ b/trex_vm_manual.asciidoc @@ -6,6 +6,18 @@ TRex Virtual Machine setup and basic usage :quotes.++: :numbered: +++++ + +++++ == Introduction diff --git a/vm_doc.asciidoc b/vm_doc.asciidoc index a61afc3c..3db0a752 100644 --- a/vm_doc.asciidoc +++ b/vm_doc.asciidoc @@ -1,3 +1,15 @@ +++++ + +++++ == VM instructions -- cgit 1.2.3-korg From 02f2d91854d7d26be4fd9846c8d911fecb63426b Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 27 Mar 2016 14:51:11 +0300 Subject: fix GA scripts --- draft_trex_stateless.asciidoc | 13 +- draft_trex_stateless_moved1.asciidoc | 14 +- packet_builder_yaml.asciidoc | 13 +- release_notes.asciidoc | 24 ++-- trex_book.asciidoc | 14 +- trex_config.asciidoc | 12 +- trex_console.asciidoc | 15 +- trex_control_plane_design_phase1.asciidoc | 15 +- trex_control_plane_peek.asciidoc | 14 +- trex_preso.asciidoc | 12 +- trex_rpc_server_spec.asciidoc | 14 +- trex_vm_manual.asciidoc | 15 +- vm_doc.asciidoc | 230 +----------------------------- 13 files changed, 34 insertions(+), 371 deletions(-) diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc index 8d958465..7fed3d96 100644 --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -21,18 +21,7 @@ ifdef::backend-xhtml11[] :p_width_1: 400 endif::backend-xhtml11[] -++++ - -++++ +include::trex_ga.asciidoc[] == Stateless support (Alpha stage) diff --git a/draft_trex_stateless_moved1.asciidoc b/draft_trex_stateless_moved1.asciidoc index c0fad4be..761d2b18 100644 --- a/draft_trex_stateless_moved1.asciidoc +++ b/draft_trex_stateless_moved1.asciidoc @@ -21,19 +21,9 @@ ifdef::backend-xhtml11[] :p_width_1: 400 endif::backend-xhtml11[] +include::trex_ga.asciidoc[] -moved to link:draft_trex_stateless.html[here] - -++++ - -++++ diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc index 5d58f986..ed80358e 100644 --- a/packet_builder_yaml.asciidoc +++ b/packet_builder_yaml.asciidoc @@ -6,20 +6,11 @@ Packet Builder Language :quotes.++: :numbered: -== change log -++++ - -++++ [options="header",cols="^1,^h,a"] |================= diff --git a/release_notes.asciidoc b/release_notes.asciidoc index fa424a94..4eb0a3c6 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -10,18 +10,8 @@ ifndef::backend-docbook[]









++++++++++++++ -++++ - -++++ == TRex release notes == endif::backend-docbook[] @@ -34,6 +24,18 @@ ifdef::backend-docbook[] endif::backend-docbook[] + +== Release 1.98 == + +* Minor Console issue +* [red]*Image is broken* see link:http://trex-tgn.cisco.com/youtrack/issue/trex-193[trex-193] + +== Release 1.97 == + +* Support pyATS with Python 3/32bit +* Per stream statistic supported by software for I350/82559/VXNET3 +* [red]*Image is broken* see link:http://trex-tgn.cisco.com/youtrack/issue/trex-193[trex-193] + == Release 1.96 == * Support pyATS/32bit/Python2.x for TCL diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 256fe5be..6b0db870 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -9,18 +9,8 @@ TRex :local_web_server_url: csi-wiki-01:8181/trex :toclevels: 4 -++++ - -++++ +include::trex_ga.asciidoc[] + == Introduction diff --git a/trex_config.asciidoc b/trex_config.asciidoc index 148637ec..c08cac4d 100755 --- a/trex_config.asciidoc +++ b/trex_config.asciidoc @@ -9,18 +9,8 @@ TRex first time configuration :deckjs_transition: horizontal-slide :scrollable: -++++ - -++++ ++++++++++++++++++ -++++ - - +include::trex_ga.asciidoc[] == Console diff --git a/trex_control_plane_design_phase1.asciidoc b/trex_control_plane_design_phase1.asciidoc index c90de74f..b9454b09 100755 --- a/trex_control_plane_design_phase1.asciidoc +++ b/trex_control_plane_design_phase1.asciidoc @@ -6,18 +6,9 @@ TRex Control Plane Design - Phase 1 :quotes.++: :numbered: -++++ - -++++ + +include::trex_ga.asciidoc[] + == Introduction diff --git a/trex_control_plane_peek.asciidoc b/trex_control_plane_peek.asciidoc index fecc7938..3392245f 100755 --- a/trex_control_plane_peek.asciidoc +++ b/trex_control_plane_peek.asciidoc @@ -7,18 +7,8 @@ TRex Control Plane Design - Phase 1 peek :numbered: -++++ - -++++ +include::trex_ga.asciidoc[] + === TRex traffic generator diff --git a/trex_preso.asciidoc b/trex_preso.asciidoc index 8d5ce899..b0773fd6 100755 --- a/trex_preso.asciidoc +++ b/trex_preso.asciidoc @@ -11,18 +11,8 @@ TRex realistic traffic generator :web_server_url: http://trex-tgn.cisco.com/trex -++++ - -++++ == What problem is being solved? diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 81d41628..c48df052 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -9,18 +9,8 @@ The TRex RPC Server :local_web_server_url: csi-wiki-01:8181/trex :toclevels: 4 -++++ - -++++ +include::trex_ga.asciidoc[] + == Change log diff --git a/trex_vm_manual.asciidoc b/trex_vm_manual.asciidoc index 3cb07470..7e22d54a 100755 --- a/trex_vm_manual.asciidoc +++ b/trex_vm_manual.asciidoc @@ -6,18 +6,9 @@ TRex Virtual Machine setup and basic usage :quotes.++: :numbered: -++++ - -++++ + +include::trex_ga.asciidoc[] + == Introduction diff --git a/vm_doc.asciidoc b/vm_doc.asciidoc index 3db0a752..05017a9c 100644 --- a/vm_doc.asciidoc +++ b/vm_doc.asciidoc @@ -1,232 +1,4 @@ -++++ - -++++ - -== VM instructions - -=== instructions_type - -these are the instructions type - - -==== fix_checksum_ipv4 - -This command will fix ipv4 checksum header - -[source,python] ----- -{ -ins_name : string ,"fix_checksum_ipv4" ## this command will recalculate the ipv4 checksum -pkt_offset : uint16_t, number, ## the offset into the packet when the ipv4 header is located -} ----- - - -==== flow_man_simple - -This command will allocate and manipulate flow object data -For example, allocate a flow variable and object from 10.0.0.1-10.0.0.10 - -[source,python] ----- -{ -ins_name: string, "flow_man_simple" ## increment a flow variable -flow_varible_name: string "name_of_varible" # internal software will allocate the object for this, the name should be unique -object_size : uint16_t #size of the variable 1,2,4,8 ( max uint64) -Operation : "inc","dec","random" # the command could be inc from min-max start at init - # decrement - # random -split_by_core : true/false ##do we want to split the range by cores -init_value : number, size of object_size (max uint64) -min_value : number, size of object_size (max uint64) -max_value : number, size of object_size (max uint64) -} ----- - -==== write_to_pkt - -This command will copy flow varible into packet offset - -[source,c] ----- -tmp_pkt_data = (flow_var +add_value) - -if (big_edian){ - (varible_size )pkt[pkt_offset] =swap(tmp_pkt_data); -} ----- - - -[source,python] ----- -{ -ins_name : string , "write_to_pkt" ## -flow_varible_name : string "name_of_varible" # flow varible value to copy from -pkt_offset : uint16_t # the offset into the packet to copy the varible -add_value : 0 (size_of_the_varible)# when writing add this value -big_edian : bool default true # swap varible when copy yo packet -} ----- - - - -=== Examples - -=== Examples1 - -an examples to a programs that change src_ip in specific range for one core -range of src_ip 10.0.0.1-10.0.0.10 start from 10.0.0.7 -update ipv4 checksum -ip offset is in 14 - - offset -[ 6 - dest mac 0 - 6 - src mac 6 - 2 network 12 - - ip[0] 14 - ip[4] 18 - ip[8]-TTL,Protocol 22 - ip[12]-src_ip 26 - ip[12]-dest_ip 30 - -} - -The program - -[source,python] ----- -[ - -{ -ins_name : "flow_data_inc" -flow_varible_name : "src_ip" -object_size : 1 -operaqtion : "inc" -split_by_core : false # one core -init_value : 7 -min_value : 1 -max_value : 10 -} , - -{ -ins_name : "write_to_pkt" -flow_varible_name : "src_ip" -pkt_offset : 26, -add_value : 0 , -big_edian : true -}, - -{ -ins_name : "fix_checksum_ipv4" -pkt_offset : 14 -} - -] ----- - -=== Examples2 - -an examples to a programs that change src_ip and dest_ip in specific range for one core -range of src_ip 10.0.0.1-10.0.0.10 start from 10.0.0.7 -range of dest_ip 48.0.0.1-48.0.0.10 start from 48.0.0.7 - -update ipv4 checksum -ip offset is in 14 - - offset -[ 6 - dest mac 0 - 6 - src mac 6 - 2 network 12 - - ip[0] 14 - ip[4] 18 - ip[8]-TTL,Protocol 22 - ip[12]-src_ip 26 - ip[12]-dest_ip 30 - -} - -The program - -[source,python] ----- - -[ - -{ -ins_name : "flow_data_inc" -flow_varible_name : "src_ip" -object_size : 1 -operaqtion : "inc" -split_by_core : false # one core -init_value : 7 -min_value : 1 -max_value : 10 -} , - -{ -ins_name : "write_to_pkt" -flow_varible_name : "src_ip" -pkt_offset : 26, -add_value : 0 , -big_edian : true -}, - -{ -ins_name : "write_to_pkt" -flow_varible_name : "src_ip" -pkt_offset : 30, -add_value : 0 , -big_edian : true -}, - - -{ -ins_name : "fix_checksum_ipv4" -pkt_offset : 14 -} -] - ----- - - -=== Considerations - - -==== Control-Plain check - -- Verify that packet offset into fix_checksum_ipv4 is less that pkt_size - min_ip_header -- There is no stream that are orphaned (not started at startup and nobody call them) - -==== Data-Plain check - -- Convert the commands to a VM compress command -- Allocate flow memory per flow for each stream (currently add the memory in each offset) -- VM runner at startup/ each packet - - - - - - - - - - - - - - - - -- cgit 1.2.3-korg From cd954766ae0d916792f5c13304fdfb6deff24b52 Mon Sep 17 00:00:00 2001 From: DavidBlock Date: Mon, 28 Mar 2016 17:29:01 +0300 Subject: David edit 1 --- draft_trex_stateless.asciidoc | 1189 ++++++++++++++++++++++------------------- 1 file changed, 634 insertions(+), 555 deletions(-) mode change 100644 => 100755 draft_trex_stateless.asciidoc diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc old mode 100644 new mode 100755 index 3760b944..150f59af --- a/draft_trex_stateless.asciidoc +++ b/draft_trex_stateless.asciidoc @@ -21,53 +21,58 @@ ifdef::backend-xhtml11[] :p_width_1: 400 endif::backend-xhtml11[] +== Audience + +This document assumes basic knowledge of TRex, and assumes that TRex is installed and configured. + +For information, see the link:trex_manual.html[manual], especially the material up to the link:trex_manual.html#_basic_usage[Basic Usage] section. + == Stateless support (Alpha stage) === High level functionality - -* High scale - line rate 14MPPS per core, linear scale with number of cores -* Support 1/10/25/40/100 Gb/sec interfaces -* Interface can be configured with multi traffic profiles -* Traffic Profile can support multi streams. Scale to 10K streams in parallel -* Each Stream -** Packet template - ability to build any packet using Scapy (e.g. MPLS/IPv4/Ipv6/GRE/VXLAN/NSH) -*** It is possible to build malformed packets +// maybe Feature overview + +* Large scale - Supports a line rate of 14 million packets per second (mpps) per core, scalable with the number of cores +* Support for 1, 10, 25, 40, and 100 Gb/sec interfaces +* Support for multiple traffic profiles per interface +* Profile can support multiple streams, scalable to 10K parallel streams +* Supported for each stream: +** Packet template - ability to build any packet (including malformed) using link:https://en.wikipedia.org/wiki/Scapy[Scapy] (example: MPLS/IPv4/Ipv6/GRE/VXLAN/NSH) ** Field engine program -*** Ability to change any field inside the packet, for example src_ip = 10.0.0.1-10.0.0.255 -*** Ability to change the packet size (e.g. Random packet size 64-9K) -** Mode - Continuous/Burst/Multi burst support -** Rate can be specified in: -*** Packet per second -(e.g. 14MPPS) -*** L1 bandwidth (e.g. 500Mb/sec) -*** L2 bandwidth (e.g. 500Mb/sec) -*** Interface link percentage,( e.g. 10%) -** Support HLTAPI like profile definition -** Action- stream can trigger a stream -* Interactive support- Fast Console, GUI -* Statistic per interface -* Statistic per stream done in hardware +*** Ability to change any field inside the packet (example: src_ip = 10.0.0.1-10.0.0.255) +*** Ability to change the packet size (example: random packet size 64-9K) +** Mode - Continuous/Burst/Multi-burst support +** Rate can be specified as: +*** Packets per second (example: 14MPPS) +*** L1 bandwidth (example: 500Mb/sec) +*** L2 bandwidth (example: 500Mb/sec) +*** Interface link percentage (example: 10%) +** Support for HLTAPI-like profile definition +** Action - stream can trigger a stream +* Interactive support - Fast Console, GUI +* Statistics per interface +* Statistics per stream done in hardware * Latency and Jitter per stream -* Blazing fast Automation support +* Blazingly fast automation support ** Python 2.7/3.0 Client API -** Python HLTAPI Client API -* Multi user support - multiple users can interact with the same TRex simultaneously +** Python HLTAPI Client API +* Multi-user support - multiple users can interact with the same TRex instance simultaneously // added "instance" -==== Prerequisite - -This document assumes that you know what is TRex and you already installed and configured it. To read more about it see here link:trex_manual.html[manual] - -You should read up to this link:trex_manual.html#_basic_usage[basic usage] ==== Traffic profile example +// Need explanation of example in figure. + image::images/stl_streams_example.png[title="Streams example",align="left",width={p_width}, link="images/stl_streams_example.png"] ==== High level functionality - near future +// "near future" and "roadmap" (below) are ~ same. Typically, Cisco does not document features before they're ready, but open source is a little different. We might want to find a better place to put the roadmap for the future - maybe a separate document. + * ARP emulation - learn server MAC. Support unlimited MAC addresses per port. -==== High level functionality - roadmap +==== High level functionality - Roadmap for future development * Add emulation support ** RIP/BGP/ISIS/SPF @@ -75,7 +80,7 @@ image::images/stl_streams_example.png[title="Streams example",align="left",width === IXIA IXExplorer vs TRex -TRex has limited functionality compared to IXIA, but has some advantages. The following table summarized the difference +TRex has limited functionality compared to IXIA, but has some advantages. The following table summarizes the differences: .TRex vs IXExplorer [cols="1^,3^,3^,5^", options="header"] @@ -83,92 +88,125 @@ TRex has limited functionality compared to IXIA, but has some advantages. The fo | Feature | IXExplorer |TRex | Description | Line rate | Yes |Almost ~14MPPS/core| | Multi stream | 255 | [green]*Unlimited* | -| Packet build flexibility | Limited | [green]*Scapy- Unlimited* | e.g GRE/VXLAN/NSH is supported. Can be extended to future protocols +| Packet build flexibility | Limited | [green]*Scapy - Unlimited* | Example: GRE/VXLAN/NSH is supported. Can be extended to future protocols | Packet Field engine | limited | [green]*Unlimited* | -| Tx Mode | Continuous/Burst/Multi burst | Continuous/Burst/Multi burst| +| Tx Mode | Continuous/Burst/Multi-burst | Continuous/Burst/Multi-burst| | ARP Emulation | Yes | Not yet - workaround | | Automation | TCL/Python wrapper to TCL | [green]*native Python/Scapy* | -| Automation speed sec| 30sec | [green]*1msec* | test of load/start/stop/get counters -| HLTAPI | Full support. 2000 pages of documentation | Limited 20 page of documentation| -| Per Stream statistic | 255 streams with 4 global mask | 128 rules for XL710/X710 hardware and software impl for 82599/I350/X550| in case of XL710/X710 there are some restrictions for the packet type +| Automation speed sec| 30 sec | [green]*1 msec* | test of load/start/stop/get counters +| HLTAPI | Full support. 2000 pages of documentation | Limited. 20 pages of documentation| +| Per Stream statistics | 255 streams with 4 global masks | 128 rules for XL710/X710 hardware and software impl for 82599/I350/X550| Some packet type restrictions apply to XL710/X710. | Latency Jitter | Yes | Yes | -| Multi user support | Yes | Yes | -| GUI | very good | WIP, packet build is scapy based. Not the same as IXIA | -| Cisco pyATS support | Yes | Yes Python 2.7, Python 64bits, WIP to port it to Python 3.0| +| Multi-user support | Yes | Yes | +| GUI | very good | WIP, packet build is scapy-based. Not the same as IXIA. | +| Cisco pyATS support | Yes | Yes - Python 2.7, Python 64-bit, WIP to port it to Python 3.0| | Emulation | Yes | Not yet | -| Port Ids | Base on IXIA numebrs | Depends on PCI enumeration +| Port IDs | Based on IXIA numebrs | Depends on PCI enumeration |================= === RPC Architecture -To support interactive mode, a JSON-RPC2 thread is added to the TRex Control Plane core. +A JSON-RPC2 thread in the TRex control plane core provides support for interactive mode. + +// RPC = Remote Procedure Call, alternative to REST? --YES, no change + +image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_2_stateless.png"] + +// Is there a big picture that would help to make the next 11 bullet points flow with clear logic? --explanation of the figure + +*Layers*:: +// TBD: rendering problem with bullet indentation + +// Maybe Layers, Interfaces, and Control of Interfaces should each be level 4 headings instead of complex bulleted lists. + +* Control transport protocol: ZMQ working in REQ/RES mode. +// change all ZMQ to "link:http://rfc.zeromq.org/spec:37[ZeroMQ] Message Transport Protocol (ZMTP)"? not sure what REQ/RES mode is +* RPC protocol on top of the control transport protocol: JSON-RPC2. +* Asynchronous transport: ZMQ working in SUB/PUB mode (used for asynchronous events such as interface change mode, counters, and so on). + +*Interfaces*:: + +* Automation API: Python is the first client to implement the Python automation API. +* User interface: The console uses the Python API to implement a user interface for TRex. -The following diagram illustrates the RPC server/client components +*Control of TRex interfaces*:: -image::images/trex_2_stateless.png[title="RPC Server Position",align="left",width={p_width}, link="images/trex_2_stateless.png"] +* Numerous users can control a single TRex server together, from different interfaces. +* Users acquire individual TRex interfaces exclusively. *Example*: Two users control a 4-port TRex server. User A acquires interfaces 0 and 1; User B acquires interfaces 3 and 4. +* Only one user interface (console or GUI) can have read/write control of a specific interface. This enables caching the TRex server interface information in the client core. *Example*: User A, with two acquired interfaces, can have only one read/write control session at a time. -* The Control transport protocol is ZMQ working in REQ/RES mode -* JSON-RPC2 is the RPC protocol on top of the ZMQ REQ/RES -* Async transport is ZMQ working SUB/PUB mode. It is for async events such as interface change mode, counters etc. -* Python is the first Client to implement the Python automation API -* Console utilizes the Python API to implement a user interface to TRex -* Multiple users can control one TRex server in parallel as long as they control different Interfaces. Individuqal TRex Interfaces can be acquired by a user. For example, a TRex with four ports can be used by two users. User A can acquire Interfaces 0 & 1 and User B can acquire Interfaces 2 & 3. -* There can be only *one* control Console/GUI (R/W) entity for a specific user. User A with two interfaces can have only one R/W Control session active at a specific time. By that we can cache the TRex Server interface information in the Client. -* For one user there can be many read-only clients for getting statistics. -* Client should sync with the server to get the state at connection time and cache the server information locally once the state was changed -* In case of crash/exit of the Client it should sync again at connection time. -* The Client has the ability to get a statistic in real time (with ASYNC ZMQ). This provides the option to have multiple ways to look into the statistics (GUI and Console) at the same time. +// confirm "This enables caching the TRex..." above -image::images/trex_stateless_multi_user.png[title="Multi user-per interface",align="left",width={p_width}, link="images/trex_stateless_multi_user.png"] +* A user can set up numerous read-only clients on a single interface - for example, for monitoring traffic statistics on the interface. +* A client in read-write mode can acquire a statistic in real time (with ASYNC ZMQ). This enables viewing statistics through numerous user interfaces (console and GUI) simultaneously. -For more detailed see RPC specification link:trex_rpc_server_spec.html[here] +*Synchronization*:: +* A client should sync with the TRex server to get the state in connection time, and cache the server information locally once the state was changed. // Not clear; avoid "should". +* If a client crashes or exits, it should sync again after reconnecting. +// Avoid "should". Meaning will be more clear without "should". + +image::images/trex_stateless_multi_user.png[title="Multiple users, per interface",align="left",width={p_width}, link="images/trex_stateless_multi_user.png"] + +For details about the TRex RPC server, see the link:trex_rpc_server_spec.html[RPC specification]. + +==== RPC architecture highlights This Architecture provides the following advantages: -* Fast interaction with TRex server. For example, very fast load/start/stop profiles to an interface (~2000 cycles/sec for load/start/stop profile) -* Leveraging Python/Scapy for building a packet/Field engine -* HLTAPI compiler complexity is done in Python +* Fast interaction with TRex server. Loading, starting, and stopping a profile for an interface is very fast - about 2000 cycles/sec. +* Leverages Python/Scapy for building a packet/field engine. +* HLTAPI compiler complexity is handled in Python. + +=== TRex Objects -=== TRex Entities +// maybe call it "Objects" in title and figure caption -image::images/stateless_objects.png[title="TRex Entities",align="left",width={p_width_1}, link="images/stateless_objects.png"] +image::images/stateless_objects.png[title="TRex Objects",align="left",width={p_width_1}, link="images/stateless_objects.png"] -* *TRex*: Each TRex instance includes a number of interfaces -* *Interface*: For each Interface it is possible to add/remove a number of traffic profiles (TP) -* *Traffic profile*: Each traffic profile includes a number of streams. This is the basic building block of activation. It is possible to add/remove traffic profiles on an interface while other traffic profiles are active on the interface. A profile can be looked as a "program" with dependency between it's streams. It is not possible to change a profile while it is running except for changing the rates +* *TRex*: Each TRex instance supports numerous interfaces. +// "one or more"? +* *Interface*: Each interface supports one or more traffic profiles (TP). +* *Traffic profile*: Each traffic profile supports one or more streams. * *Stream*: Each stream includes: -** *Packet*: Packet template up to 9K bytes -** *Field Engine*: which field to change, do we want to change the packet size -** *Mode*: How to send the packet. Continuous/Burst/Multi Burst -** *Rx Stats*: Which Statstistic to collect for each stream -** *Rate*: Specified in Packet Per Second (pps) or bandwidth (bps) -** *Action*: The next stream to go after this stream is finished. Valid for Burst/Continuous mode +** *Packet*: Packet template up to 9 KB +// ok to standardize to KB? +** *Field Engine*: Which field to change, do we want to change packet size +// unclear +** *Mode*: Specifies how to send packets: Continuous/Burst/Multi-burst +** *Rx Stats*: Statistics to collect for each stream +** *Rate*: Rate (packets per second or bandwidth) +** *Action*: Specifies stream to follow when the current stream is complete. (valid for Continuous or Burst modes) === Stateful vs Stateless -TRex Stateless support is basic L2/L3 tests more for Switch/Router. -With Stateless it is possible to define a Stream that has a *one* packet template, define a program to change any fields in the packet and run it in continues/burst/multi-burst mode. -With Statless you *can't* learn NAT translation because there is no context of flow/client/server. In Stateful the basic building block is a flow/application (That compose from many packets). -However, Using Stateless mode, it is much more flexible as you can define any type of packets and build simple program and in a way you can mimic Stateful but not everything. -For example, you can load a pcap with the number of packets as a link of streams -a->b->c->d-> back to a -And create a program for each stream to change src_ip=10. 0.0.1-10.0.0.254 this will create something similar to Stateful but the underline is totally different. -If you are confused you probably need Stateless. +TRex Stateless support enables basic L2/L3 testing, relevant mostly for a switch or router. Using Statelss mode, it is possible to define a stream with a *one* packet template, define a program to change any fields in the packet, and run the stream in continuous, burst, or multi-burst mode. +With Stateless, you *cannot* learn NAT translation; there is no context of flow/client/server. -.Stateful vs Stateless +* In Stateful mode, the basic building block is a flow/application (composed from many packets). +* Stateless mode is much more flexible, enabling you to define any type of packet, and build a simple program. + +.Stateful vs Stateless features [cols="1^,3^,3^", options="header"] |================= -| Feature | Stateless |Statful +| Feature | Stateless |Stateful | Flow base | No | Yes | NAT | No | Yes -| Tunnel | Yes | Only specific +| Tunnel | Yes | Some are supported | L7 App emulation | No | Yes | Any type of packet | Yes | No | Latency Jitter | Per Stream | Global/Per flow |================= +==== Using Stateless mode to mimic Stateful mode + +Stateless mode can mimic some, but not all functionality of Stateful mode. +For example, you can load a pcap with the number of packets as a link of streams: +a->b->c->d-> back to a +You can then create a program for each stream to change src_ip=10. 0.0.1-10.0.0.254. This creates traffic similar to that of Stateful mode, but with a completely different basis. + +If you are confused you probably need Stateless. === TRex package folders @@ -193,19 +231,23 @@ If you are confused you probably need Stateless. | /automation/trex_control_plane/stl/examples | Stateless Examples |============================= -=== Basic Tutorials +=== Tutorials -This tutorial will walk you through basic but complete TRex Stateless use cases that will show you common concepts as well as slightly more advanced ones. +The tutorials in this section demonstrate basic TRex *stateless* use cases. The examples include common and moderately advanced TRex concepts. ==== Tutorial: Simple IPv4/UDP packet - TRex -*Goal*:: Send a simple UDP packet from all the ports +*Goal*:: + +Send a simple UDP packet from all ports of a TRex server. *Traffic profile*:: -Traffic profile (TP) is a way to define *how* to generate the traffic. It defines the traffic templates for the rate, the mode and which fields in the packet to change. The following example defines a profile with one stream. The stream is with IP/UDP packet template with 10 bytes of 'x'(0x78) of payload. to get more example how to define packets using scapy see here link:http://www.secdev.org/projects/scapy/doc/[Scapy] +The following profile defines one stream, with an IP/UDP packet template with 10 bytes of 'x'(0x78) of payload. For more examples of defining packets using Scapy see the link:http://www.secdev.org/projects/scapy/doc/[Scapy documentation]. -*file*:: link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] +*File*:: + +link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] [source,python] ---- @@ -233,14 +275,14 @@ class STLS1(object): def register(): <4> return STLS1() ---- -<1> Define the packet, in this case it IP/UDP with 10 bytes of 'x'(0x78) .See more here link:http://www.secdev.org/projects/scapy/doc/[Scapy] -<2> Mode is Continuous with a rate of 1 pps (default rate is 1 PPS) -<3> get_streams function is mandatory -<4> Each Traffic profile module should have a `register` function +<1> Defines the packet. In this case, the packet is IP/UDP with 10 bytes of 'x'. For more information, see: link:http://www.secdev.org/projects/scapy/doc/[Scapy] +<2> Mode: Continuous. Rate: 1 PPS (default rate is 1 PPS) +<3> The `get_streams` function is mandatory +<4> Each traffic profile module requires a `register` function. [NOTE] ===================================================================== -The SRC/DST MAC addrees are taken from /etc/trex_cfg.yaml. if you want to change them to be different just add Ether(dst="00:00:dd:dd:00:01") with your destination +The SRC/DST MAC addresses are taken from /etc/trex_cfg.yaml. To change them, add Ether(dst="00:00:dd:dd:00:01") with the desired destination. ===================================================================== @@ -248,7 +290,7 @@ The SRC/DST MAC addrees are taken from /etc/trex_cfg.yaml. if you want to change [NOTE] ===================================================================== -There is no need to install any python packages (including scapy). The TRex package includes all the packages it requires +The TRex package includes all required packages. It is unnecessary to install any python packages (including Scapy). ===================================================================== [source,bash] @@ -256,14 +298,14 @@ There is no need to install any python packages (including scapy). The TRex pack $sudo ./t-rex-64 -i ---- -* You should wait until the server is up and running. -* You can add `-c` for adding more cores -* You can add `--cfg` for different configuration file +* Wait until the server is up and running. +* (Optional) Use `-c` to add more cores. +* (Optional) Use `--cfg` to specify a different configuration file. The default is link:trex_manual.html#_create_minimum_configuration_file[trex_cfg.yaml]. -*Connect with Console*:: +*Connect with console*:: -From the same machine in a different terminal (either open a new window using `xterm`, or `ssh` again) run the folowing command +On the same machine, in a new terminal window (open a new window using `xterm`, or `ssh` again), connect to TRex using `trex-console`. [source,bash] ---- @@ -293,20 +335,21 @@ Starting traffic on port(s) [0, 1, 2, 3]: [SUCCESS] # show dynamic statistic >tui ---- -<1> Connect to TRex server assume server at local machine -<2> Start the traffic on all the ports in 10mbps. you can try with 14MPPS -<3> Pause the traffic -<4> Resume -<5> Stop on all the ports +<1> Connects to the TRex server from the local machine. +<2> Start the traffic on all ports at 10 mbps. Can also specify as MPPS. Example: 14 MPPS (`-m 14mpps`). +<3> Pauses the traffic. +<4> Resumes. +<5> Stops traffic on all the ports. [NOTE] ===================================================================== -In case you have a connection *error* look into /etc/trex_cfg.yaml -you should *remove* keywords like `enable_zmq_pub : true` and `zmq_pub_port : 4501` from the file. +If you have a connection *error*, open the /etc/trex_cfg.yaml file and remove keywords such as `enable_zmq_pub : true` and `zmq_pub_port : 4501` from the file. ===================================================================== -To look into the streams using `streams -a` +*Viewing streams*:: + +To display stream data for all ports, use `streams -a`. .Streams [source,bash] @@ -338,9 +381,14 @@ Port 3: ---- -to get help on a command run `command --help` +*Viewing command help*:: -to look into general statistics + +To view help for a command, use ` --help`. + +*Viewing general statistics*:: + +To view general statistics, open a "textual user interface" with `tui`. [source,bash] ---- @@ -395,7 +443,7 @@ Port Statistics *Discussion*:: -In this example TRex sends the *same* packet from all the ports. If your setup is connected with loopback you will see Tx packets from port 0 in Rx port 1 and vice versa. If however you are having DUT with static route you might see all the packets going to a specific port. +In this example TRex sends the *same* packet from all ports. If your setup is connected with loopback, you will see Tx packets from port 0 in Rx port 1 and vice versa. If you are having DUT with a static route, you might see all the packets going to a specific port. .Static route [source,bash] @@ -413,9 +461,9 @@ ip route 16.0.0.0 255.0.0.0 1.1.9.2 ip route 48.0.0.0 255.0.0.0 1.1.10.2 ---- -In this example all the packets will be routed to port `TenGigabitEthernet0/1/0` +// this is good info, but it isn't organized into specific tasks or explanations of specific goals. so comes across as useful but somewhat random. for example in the Static route example above, we should explain at the beginning that this will route all packets to one port, and that the next example will demonstrate how to route the packets to different ports. -To solve this there is a way to use direction flag in the script +In this example all the packets will be routed to `TenGigabitEthernet0/1/0` port. The following example uses the `direction` flag to change this. *file*:: link:{github_stl_path}/udp_1pkt_simple_bdir.py[stl/udp_1pkt_simple_bdir.py] @@ -448,16 +496,17 @@ To solve this there is a way to use direction flag in the script return [ STLStream( packet = pkt,mode = STLTXCont()) ] ---- -<1> Usage of direction. The packet will be different for each direction +<1> This use of the `direction` flag here causes a different packet to be sent for each direction. ==== Tutorial: Connect from a remote server -*Goal*:: Console connect from a remote machine to TRex server +*Goal*:: Connect by console from a remote machine to a TRex server -*Check that TRex server is up*:: +*Check that TRex server is operational*:: -Make sure TRex server is running, if not run TRex in interactive mode +Ensure that the TRex server is running. If not, then run TRex in interactive mode. +// again, this is a bit vague. the tutorial should provide simple steps for using interactive mode or not. too many conditions. [source,bash] ---- @@ -466,23 +515,23 @@ $sudo ./t-rex-64 -i *Connect with Console*:: -From remote machine you can run this with `-s` flag +From a remote machine, use `trex-console` to connect. Include the `-s` flag, as shown below, to specify the server. [source,bash] ---- $trex-console -s csi-kiwi-02 #<1> ---- -<1> TRex server is csi-kiwi-02 +<1> TRex server is csi-kiwi-02. -if the default python is not 64bit/2.7.x you can change the *PYTHON* environment variable using +The TRex client requires Python versions 2.7.x or 3.4.x. To change the Python version, set the *PYTHON* environment variable as follows: -.tcsh +.tcsh shell [source,bash] ---- setenv PYTHON /bin/python #tcsh ---- -.bash +.bash shell [source,bash] ---- extern PYTHON=/bin/mypython #bash @@ -490,21 +539,16 @@ extern PYTHON=/bin/mypython #bash [NOTE] ===================================================================== -Client machine should run Python 2.7 and Python 64bit version. Cisco CEL/ADS is supported. Python 3.0 support in WIP -You should have the same tree of source code in the client side. We are working on a zip file that include only the client python/so files +The client machine should run Python 2.7.x or 3.4.x. Cisco CEL/ADS is supported. The TRex package includes the required link:cp_stl_docs/[client archive]. ===================================================================== -==== Tutorial: Source and Destination MAC address - -*Goal*:: Change source/destination MAC address +==== Tutorial: Source and Destination MAC addresses -Each TRex port has a source and destination MAC (DUT) configured in /etc/trex_cfg.yaml. -The source MAC is not necessarily the hardware MAC address configured in eeprom. -By default those MAC (source and destination) is taken. -In case a user configures a source or destination MAC explicitly this MAC will take precedence. +*Goal*:: Change the source/destination MAC address +Each TRex port has a source and destination MAC (DUT) configured in the /etc/trex_cfg.yaml configuration file. The source MAC is not necessarily the hardware MAC address configured in EEPROM. By default, the hardware-specified MAC addresses (source and destination) are used. If a source or destination MAC address is configured explicitly, that address takes precedence over the hardware-specified default. -.MAC addrees +.MAC address [format="csv",cols="2^,2^,2^", options="header",width="100%"] |================= Scapy , Source MAC,Destination MAC @@ -513,9 +557,9 @@ Ether(src="00:bb:12:34:56:01"),"00:bb:12:34:56:01",trex_cfg(dst) Ether(dst="00:bb:12:34:56:01"),trex_cfg(src),"00:bb:12:34:56:01" |================= -For example +Example: -*file*:: link:{github_stl_path}/udp_1pkt_1mac_override.py[stl/udp_1pkt_1mac_override.py] +*File*:: link:{github_stl_path}/udp_1pkt_1mac_override.py[stl/udp_1pkt_1mac_override.py] [source,python] ---- @@ -525,14 +569,15 @@ For example IP(src="16.0.0.1",dst="48.0.0.1")/ UDP(dport=12,sport=1025) ---- -<1> Don't use TRex port src interface MAC. Instead replace it with 00:bb:12:34:56:01 +<1> Specifying the source interface MAC replaces the default specified in the configuration YAML file. + [IMPORTANT] ===================================== -A TRex port will receive a packet only if the packet has a destination MAC matching the HW Src mac defined for that port in the `/etc/trex_cfg.yaml`. A port can be put into promiscuous mode, allowing receipt of all the packets on the line, by configure it through the API or at the Console with `portattr -a --prom`. +A TRex port will receive a packet only if the packet's destination MAC matches the HW Src MAC defined for that port in the `/etc/trex_cfg.yaml` configuration file. Alternatively, a port can be put into link:https://en.wikipedia.org/wiki/Promiscuous_mode[promiscuous mode], allowing the port to receive all packets on the line. The port can be configured to promiscuous mode by API or by the following command at the console: `portattr -a --prom`. ===================================== -To show the port mode +To set ports to link:https://en.wikipedia.org/wiki/Promiscuous_mode[promiscuous mode] and show the port status: [source,bash] ---- @@ -545,7 +590,7 @@ Port Status driver | rte_ixgbe_pmd | rte_ixgbe_pmd | maximum | 10 Gb/s | 10 Gb/s | status | IDLE | IDLE | -promiscuous | off | off | #<2> +promiscuous | on | on | #<2> -- | | | HW src mac | 90:e2:ba:36:33:c0 | 90:e2:ba:36:33:c1 | SW src mac | 00:00:00:01:00:00 | 00:00:00:01:00:00 | @@ -554,12 +599,12 @@ SW dst mac | 00:00:00:01:00:00 | 00:00:00:01:00:00 | PCI Address | 0000:03:00.0 | 0000:03:00.1 | NUMA Node | 0 | 0 | ---- -<1> Configure all the ports to be promiscuous -<2> Check port promiscuous mode +<1> Configures all ports to promiscuous mode. +<2> Indicates port promiscuous mode status. -To change the mode via Python API do this: +To change ports to promiscuous mode by Python API: -.Python API to change to promiscuous mode +.Python API to change ports to promiscuous mode [source,python] ---- c = STLClient(verbose_level = LoggerApi.VERBOSE_REGULAR) @@ -576,28 +621,32 @@ To change the mode via Python API do this: c.set_port_attr(my_ports, promiscuous = True) <2> ---- -<1> Get port info for all the ports -<2> Change port attribute +<1> Get port info for all ports. +<2> Change the port attribute to `promiscuous = True`. -See here for more info link:cp_stl_docs/api/client_code.html[Python Client API] +For more information see the link:cp_stl_docs/api/client_code.html[Python Client API]. [NOTE] ===================================================================== -Interface is not promiscuous mode by default. If you change it to be True, it is better to change it back after your test. +An interface is not set to promiscuous mode by default. Typically, after changing the port to promiscuous mode for a specific test, it is advisable to change it back to non-promiscuous mode. ===================================================================== ==== Tutorial: Python automation -*Goal*:: Simple automation test using Python from remote or local machine +*Goal*:: Simple automation test using Python from a local or remote machine + +*Directories*:: -Python API examples are located here: `automation/trex_control_plane/stl/examples`. +Python API examples: `automation/trex_control_plane/stl/examples`. -The Python API library is located here: `automation/trex_control_plane/stl/trex_stl_lib`. +Python API library: `automation/trex_control_plane/stl/trex_stl_lib`. -The TRex Console uses the python API library to interact with the TRex server using the JSON-RPC2 protocol over ZMQ. +The TRex console uses the Python API library to interact with the TRex server using the JSON-RPC2 protocol over ZMQ. -*file*:: link:{github_stl_examples_path}/stl_bi_dir_flows.py[stl_bi_dir_flows.py] +image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_2_stateless.png"] + +*File*:: link:{github_stl_examples_path}/stl_bi_dir_flows.py[stl_bi_dir_flows.py] [source,python] @@ -647,7 +696,7 @@ def create_pkt (size, direction): return STLPktBuilder(pkt = base/pad, vm = vm) - <5> + def simple_burst (): # create client @@ -725,26 +774,26 @@ def simple_burst (): # run the tests simple_burst() ---- -<1> Import the stl_path. You should *fix* the path to point to your stl_trex library path. -<2> Import TRex Stateless library. The path should be fixed. -<3> Create packet per direction using Scapy. -<4> This is something more advanced will be explained later. -<5> Connect to local TRex. Username and server can be added. -<6> Acquire the ports. -<7> Load the profile and start the traffic -<8> Wait for the traffic to be finished. There is a polling function so you can test do something while waiting -<9> Get port statistics -<10> Disconnect +<1> Imports the stl_path. The path here is specific to this example. When configuring, provide the path to your stl_trex library. +<2> Imports TRex Stateless library. When configuring, provide the path to your TRex Stateless library. +<3> Creates packet per direction using Scapy. +<4> See the Field Engine section for information. +<5> Connects to the local TRex. Username and server can be added. +<6> Acquires the ports. +<7> Loads the traffic profile and start generating traffic. +<8> Waits for the traffic to be finished. There is a polling function so you can test do something while waiting. +<9> Get port statistics. +<10> Disconnects. + +See link:cp_stl_docs/index.html[TRex Stateless Python API] for details about using the Python APIs. ==== Tutorial: HLT Python API -HLT Python API is a layer on top of the native layer. It supports the standard Cisco traffic generator API. -See more in Cisco/IXIA/Spirent documentation. -TRex supported a limited number of HLTAPI arguments and the recommendation is to use the native API due to the flexibility and simplicity. -IXIA for example, has a book of ~2000 pages for specifying all the HLTAPI mode of operations. One of the reasons for the 2000 pages is that in the API there is no clear separation between the definition of the template packet, and the fields that need to be changed and the mode of transmission. This creates a bloat of arguments that need to be documented. +HLT Python API is a layer on top of the native layer. It supports the standard Cisco traffic generator API. For more information, see Cisco/IXIA/Spirent documentation. +TRex supports a limited number of HLTAPI arguments and the recommendation is to use the native API due to the flexibility and simplicity. -The supported classs are: +Supported HLT Python API classes: * Device Control ** connect @@ -759,8 +808,12 @@ The supported classs are: ** traffic_control ** traffic_stats +// IGNORE: This line simply ends the bulletted section so that the next line will be formatted correctly. -*file*:: link:{github_stl_examples_path}/hlt_udp_simple.py[hlt_udp_simple.py] +For details, see link:#_hlt_supported_arguments_a_id_altapi_support_a[Appendix] +// confirm link above + +*File*:: link:{github_stl_examples_path}/hlt_udp_simple.py[hlt_udp_simple.py] [source,python] @@ -873,30 +926,27 @@ if __name__ == "__main__": print 'Done' ---- -<1> import Native TRex API -<2> import HLT TRex - - +<1> Imports native TRex API. +<2> Imports HLT API. ==== Tutorial: Simple IPv4/UDP packet - Simulator -*Goal*:: Demonstrates the most basic use case using TRex simulator +*Goal*:: Use the TRex Stateless simulator. +Demonstrates the most basic use case using TRex simulator. -The simulator is a tool called `stl-sim` that is part of the TRex package. -It is a python script that calls an executable. -The executable should run on the same machine that TRex image run (it won't run on an older Linux distributions). +The TRex package includes a simulator tool, `stl-sim`. The simulator operates as a Python script that calls an executable. The platform requirements for the simulator tool are the same as for TRex. -Using the simulator you can : +The TRex simulator can: -* Test your traffic profiles before running it on TRex. -* It can generate the output pcap file -* Simulate number of threads -* Convert from one type of profile to another -* Convert any profile to JSON (API) +* Test your traffic profiles before running them on TRex. +* Generate an output pcap file. +* Simulate a number of threads. +* Convert from one type of profile to another. +* Convert any profile to JSON (API). For information, see: link:trex_rpc_server_spec.html#_add_stream[TRex stream specification] -let's take this profile +Example traffic profile: *file*:: link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] @@ -926,11 +976,11 @@ class STLS1(object): def register(): <3> return STLS1() ---- -<1> Define the packet, in this case it IP/UDP with 10 bytes of 'x' -<2> Mode is Continuous with rate of 1 PPS (default rate is 1 PPS) -<3> Each Traffic profile module should have a `register` function +<1> Defines the packet - in this case, IP/UDP with 10 bytes of 'x'. +<2> Mode is Continuous, with a rate of 1 PPS. (Default rate: 1 PPS) +<3> Each traffic profile module requires a `register` function. -Now let's try to run it through the TRex simulator while limiting the number of packets to 10 +The following runs the traffic profile through the TRex simulator, limiting the number of packets to 10, and storing the output in a pcap file. [source,bash] ---- @@ -971,12 +1021,12 @@ $ ./stl-sim -f stl/udp_1pkt_simple.py -o b.pcap -l 10 written 10 packets to 'b.pcap' ---- +Contents of the output pcap file produced by the simulator in the previous step: -The following figure presents the output pcap file +image::images/stl_tut_1.png[title="TRex simulator output stored in pcap file",align="left",width={p_width}, link="images/stl_tut_1.png.png"] -image::images/stl_tut_1.png[title="Wireshark Tutorial 1 output",align="left",width={p_width}, link="images/stl_tut_1.png.png"] +Adding `--json` displays the details of the JSON command for adding a stream: -.To look into the JSON command to the server [source,bash] ---- $./stl-sim -f stl/udp_1pkt_simple.py --json @@ -1036,9 +1086,9 @@ $./stl-sim -f stl/udp_1pkt_simple.py --json ] ---- -For more detailed on Stream definition see RPC specification link:trex_rpc_server_spec.html#_add_stream[here] +For more information about stream definition, see the link:trex_rpc_server_spec.html#_add_stream[RPC specification]. -.To convert the profile into YAML format +To convert the profile to YAML format: [source,bash] ---- $./stl-sim -f stl/udp_1pkt_simple.py --yaml @@ -1061,7 +1111,7 @@ $./stl-sim -f stl/udp_1pkt_simple.py --yaml split_by_var: '' ---- -To look into the Packet detail try --pkt option (using scapy) +To display packet details, use the `--pkt` option (using Scapy). [source,bash] ---- @@ -1100,7 +1150,7 @@ $./stl-sim -f stl/udp_1pkt_simple.py --pkt 0030 78 78 78 78 xxxx ---- -To convert any profile type to native again use the `--native` option +To convert any profile type to native again, use the `--native` option: .Input YAML format [source,python] @@ -1116,7 +1166,8 @@ $more stl/yaml/imix_1pkt.yaml pps: 100 ---- -.Convert to Native +To convert to native: + [source,bash] ---- $./stl-sim -f stl/yaml/imix_1pkt.yaml --native @@ -1155,13 +1206,13 @@ def register(): The following are the main traffic profile formats. Native is the preferred format. There is a separation between how the traffic is defined and how to control/activate it. The API/Console/GUI can load a traffic profile and start/stop/get a statistic. Due to this separation it is possible to share traffic profiles. -.Traffic profiles formats +.Traffic profile formats [cols="1^,1^,10<", options="header",width="80%"] |================= | Profile Type | Format | Description -| Native | Python | Has the most flexibility. Any format can be converted to native using `stl-sim` using --native option -| HLT | Python | Uses HLT arguments -| YAML | YAML | It is the common denominator traffic profile. We suggest not to use it by human as it is not possible to compose packet using scapy. it is used to move a profile between GUI and Console or API. It can be converted to native using the stl-sim using --native switch +| Native | Python | Most flexibile. Any format can be converted to native using the `stl-sim` command with the `--native` option. +| HLT | Python | Uses HLT arguments. +| YAML | YAML | The common denominator traffic profile. Information is shared between console, GUI, and simulator in YAML format. This format is difficult to use for defining packets; primarily for machine use. YAML can be converted to native using the `stl-sim` command with the `--native` option. |================= @@ -1169,12 +1220,13 @@ The following are the main traffic profile formats. Native is the preferred form ==== Tutorial: Simple Interleave streams -*Goal*:: Demonstrate interleaving of multiple streams +*Goal*:: Demonstrate interleaving of multiple streams. -The following example demonstrates 3 streams with different rates (pps=10,20,40) and different start time ISG (0,25msec,50msec) +The following example demonstrates 3 streams with different rates (10, 20, 40 PPS) and different start times, based on an inter-stream gap (ISG) of 0, 25 msec, or 50 msec. -*file*:: link:{github_stl_path}/simple_3pkt.py[stl/simple_3pkt.py] +*File*:: link:{github_stl_path}/simple_3pkt.py[stl/simple_3pkt.py] +.Interleaving multiple streams [source,python] ---- def create_stream (self): @@ -1204,33 +1256,37 @@ The following example demonstrates 3 streams with different rates (pps=10,20,40) ) ]).get_streams() ---- -<1> Define template packets using scapy -<2> Define streams with rate of 10 -<3> Define streams with rate of 20 -<4> Define streams with rate of 40 +<1> Defines template packets using Scapy. +<2> Defines streams with rate of 10 PPS. +<3> Defines streams with rate of 20 PPS. +<4> Defines streams with rate of 40 PPS. +// inserted this comment to fix rendering problem - otherwise the next several lines are not rendered +// there's still a problem with the rendering. the image is not displayed. -The output:: -The folowing figure present the output +*Output*:: + +image::images/stl_inter.png[title="Interleaving of streams",align="left",width={p_width}, link="images/stl_inter.png"] -image::images/stl_inter.png[title="Interleave streams",align="left",width={p_width}, link="images/stl_inter.png"] - *Discussion*:: -1. stream #1 schedule a packet each 100msec -2. stream #2 schedule a packet each 50msec -3. stream #3 schedule a packet each 25msec -4. Stream #2 start after 25msec relative to stream #1 -5. Stream #3 start after 50msec relative to stream #1 +* Stream #1 +** Schedules a packet each 100 msec +* Stream #2 +** Schedules a packet each 50 msec +** Starts 25 msec after stream #1 +* Stream #3 +** Schedules a packet each 25 msec +** Starts 50 msec after stream #1 -You can use the simulator to look into the details (pcap file) +You can run the traffic profile in the TRex simulator and view the details in the pcap file containing the simulation output. [source,bash] ---- $./stl-sim -f stl/simple_3pkt.py -o b.pcap -l 200 ---- -or run it from Console on a TRex +To run the traffic profile from console in TRex, use the following command. [source,bash] ---- @@ -1245,7 +1301,7 @@ The following example demonstrates: 1. More than one stream 2. Burst of 10 packets -3. One Stream activates another Stream (self_start=False) +3. One stream activating another stream (see `self_start=False` in the traffic profile) *file*:: link:{github_stl_path}/burst_3pkt_60pkt.py[stl/burst_3pkt_60pkt.py] @@ -1265,7 +1321,7 @@ The following example demonstrates: return STLProfile( [ STLStream( isg = 10.0, # star in delay name ='S0', packet = STLPktBuilder(pkt = base_pkt/pad), - mode = STLTXSingleBurst( pps = 10, total_pkts = 10), <1> + mode = STLTXSingleBurst( pps = 10, total_pkts = 10), <1> next = 'S1'), # point to next stream STLStream( self_start = False, # stream is disabled enable trow S0 <2> @@ -1282,29 +1338,29 @@ The following example demonstrates: ]).get_streams() ---- -<1> Stream S0 is with self_start=True, start after 10 sec -<2> S1 with self_start=False. S0 activates it -<3> S2 is activated by S1 +<1> Stream S0 is configured to `self_start=True`, starts after 10 sec. +<2> S1 is configured to `self_start=False`, activated by stream S0. +<3> S2 is activated by S1. -To run the simulator run this command +To run the simulation, use this command. [source,bash] ---- $ ./stl-sim -f stl/stl/burst_3pkt_60pkt.py -o b.pcap ---- -The pcap file should have 60 packets. The first 10 packets have src_ip=16.0.0.1. The next 20 packets has src_ip=16.0.0.2. The next 30 packets has src_ip=16.0.0.3 +The generated pcap file has 60 packets. The first 10 packets have src_ip=16.0.0.1. The next 20 packets has src_ip=16.0.0.2. The next 30 packets has src_ip=16.0.0.3. -This profile can be run from Console using this command +This run the profile from console use this command. [source,bash] ---- TRex>start -f stl/stl/burst_3pkt_60pkt.py --port 0 ---- -==== Tutorial: Multi Burst mode +==== Tutorial: Multi-burst mode -*Goal* : Learn Multi burst transmit mode +*Goal* : Use Multi-burst transmit mode *file*:: link:{github_stl_path}/multi_burst_2st_1000pkt.py[stl/multi_burst_2st_1000pkt.py] @@ -1320,13 +1376,13 @@ TRex>start -f stl/stl/burst_3pkt_60pkt.py --port 0 pad = max(0, size - len(base_pkt)) * 'x' - return STLProfile( [ STLStream( isg = 10.0, # start in delay <1> + return STLProfile( [ STLStream( isg = 10.0, # start in delay <1> name ='S0', packet = STLPktBuilder(pkt = base_pkt/pad), mode = STLTXSingleBurst( pps = 10, total_pkts = 10), next = 'S1'), # point to next stream - STLStream( self_start = False, # stream is disabled. Enabled by S0 <2> + STLStream( self_start = False, # stream is disabled. Enabled by S0 <2> name ='S1', packet = STLPktBuilder(pkt = base_pkt1/pad), mode = STLTXMultiBurst( pps = 1000, @@ -1338,8 +1394,8 @@ TRex>start -f stl/stl/burst_3pkt_60pkt.py --port 0 ]).get_streams() ---- -<1> Stream S0 will wait 10 usec(isg) and then send a burst of 10 packet at 10 PPS rate -<2> Multi burst of 5 bursts of 4 packets with an inter burst gap of one second +<1> Stream S0 waits 10 usec (inter-stream gap, ISG) and then sends a burst of 10 packets at 10 PPS. +<2> Multi-burst of 5 bursts of 4 packets with an inter-burst gap of 1 second. image::images/stl_tut_4.png[title="Streams example",align="left",width={p_width}, link="images/stl_tut_4.png"] @@ -1392,7 +1448,7 @@ image::images/stl_tut_4.png[title="Streams example",align="left",width={p_width} *Goal* : Demonstrate how to create an IMIX traffic profile. -This profile has 3 streams, each with different size packet. The rate is different for each stream/size. See link:https://en.wikipedia.org/wiki/Internet_Mix[here] +This profile defines 3 streams, with packets of different sizes. The rate is different for each stream/size. See the link:https://en.wikipedia.org/wiki/Internet_Mix[Wikipedia article on Internet Mix]. *file*:: link:{github_stl_path}/imix.py[stl/imix.py] @@ -1422,7 +1478,7 @@ This profile has 3 streams, each with different size packet. The rate is differe mode = STLTXCont(pps = pps)) - def get_streams (self, direction = 0, **kwargs): <1> + def get_streams (self, direction = 0, **kwargs): <1> if direction == 0: <2> src = self.ip_range['src'] @@ -1457,26 +1513,32 @@ This profile has 3 streams, each with different size packet. The rate is differe # create imix streams return [self.create_stream(x['size'], x['pps'],x['isg'] , vm) for x in self.imix_table] ---- -<1> Base on the direction, we will construct a diffrent stream (replace src and dest) -<2> Even port id has direction==0 and odd has direction==1 -<3> We didn't explain this yet. This is a Field Engine program to change fields inside the packets +<1> Constructs a diffrent stream for each direction (replaces src and dest). +<2> Even port id has direction==0 and odd has direction==1. +<3> Field Engine program to change fields within the packets. +// we can link "Field Engine" to an appropriate location for for more info. ==== Tutorial: Field Engine, Syn attack -The following example demonstrates changing packet fields. -The Field Engine (FE) has limited number of instructions/operation for supporting most use cases. There is a plan to add LuaJIT to be more flexible at the cost of performance. -The FE can allocate stream variables in a Stream context, write a stream variable to a packet offset, change packet size, etc. +The following example demonstrates changing packet fields. The Field Engine (FE) has a limited number of instructions/operation, which support most use cases. +The FE can: +* Allocate stream variables in a stream context +* Write a stream variable to a packet offset +* Change packet size +* and more... +* There is a plan to add LuaJIT to be more flexible at the cost of performance. -*Some examples for what can be done:* +*Examples:* -* Change ipv4.tos 1-10 -* Change packet size to be random in the range 64-9K -* Create range of flows (change src_ip, dest_ip, src_port, dest_port) -* Update IPv4 checksum +* Change ipv4.tos value (1 to 10) +* Change packet size to a random value in the range 64 to 9K +* Create a range of flows (change src_ip, dest_ip, src_port, dest_port) +* Update the IPv4 checksum -for more info see link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_obj_a[here] +For more information, see link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_obj_a[here] +// add link to Python API: http://trex-tgn.cisco.com/trex/doc/cp_stl_docs/api/field_engine.html -The following example demonstrates creating SYN attack from many src to one server. +The following example demonstrates creating a SYN attack from many src addresses to one server. *file*:: link:{github_stl_path}/syn_attack.py[stl/syn_attack.py] @@ -1513,21 +1575,21 @@ The following example demonstrates creating SYN attack from many src to one serv vm = vm) return STLStream(packet = pkt, - random_seed = 0x1234,# can be remove. will give the same random value any run + random_seed = 0x1234,# can be removed. will give the same random value any run mode = STLTXCont()) ---- -<1> Create SYN packet using Scapy -<2> Define stream variable name=ip_src, 4 bytes size for IPv4. -<3> Define stream variable name=src_port, 2 bytes size for port. -<4> Write ip_src stream var into `IP.src` packet offset. Scapy calculates the offset. We could gave `IP:1.src" for second IP header in the packet -<5> Fix IPv4 checksum. here we provide the header name `IP` we could gave `IP:1` for second IP -<6> Write src_port stream var into `TCP.sport` packet offset. TCP checksum is not updated here +<1> Creates SYN packet using Scapy . +<2> Defines a stream variable `name=ip_src`, size 4 bytes, for IPv4. +<3> Defines a stream variable `name=src_port`, size 2 bytes, for port. +<4> Writes `ip_src` stream var into `IP.src` packet offset. Scapy calculates the offset. Can specify `IP:1.src` for a second IP header in the packet. +<5> Fixes IPv4 checksum. Provides the header name `IP`. Can specify `IP:1` for a second IP. +<6> Writes `src_port` stream var into `TCP.sport` packet offset. TCP checksum is not updated here. -WARNING: Original Scapy does not have the capability to calculate offset for a header/field by name. This offset capability won't work for all the cases because there could be complex cases that Scapy rebuild the header. In such cases put offset as a number +WARNING: Original Scapy cannot calculate offset for a header/field by name. This offset capability will not work for all cases. In some complex cases, Scapy may rebuild the header. In such cases, specify the offset as a number. -The output pcap file field can be seen here +Output pcap file: -.Pcap file output +.Output - pcap file [format="csv",cols="1^,2<,2<", options="header",width="40%"] |================= pkt,Client IPv4,Client Port @@ -1542,8 +1604,8 @@ pkt,Client IPv4,Client Port ==== Tutorial: Field Engine, Tuple Generator -The following example demonstrates creating multiply flows from the same packet template. -The Tuple Generator instructions are used to create two stream variables for IP, port. See link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_obj_a[here] +The following example creates multiple flows from the same packet template. The Tuple Generator instructions are used to create two stream variables for IP and port. See link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_obj_a[here] +// clarify link *file*:: link:{github_stl_path}/udp_1pkt_tuple_gen.py[stl/udp_1pkt_tuple_gen.py] @@ -1568,12 +1630,13 @@ The Tuple Generator instructions are used to create two stream variables for IP, pkt = STLPktBuilder(pkt = base_pkt/pad, vm = vm) ---- -<1> Define struct with two dependent variables: tuple.ip, tuple.port -<2> Write tuple.ip variable to `IPv4.src` field offset -<3> Write tuple.port variable to `UDP.sport` field offset. You should set UDP.checksum to zero +<1> Defines a struct with two dependent variables: tuple.ip, tuple.port +<2> Writes the tuple.ip variable to `IPv4.src` field offset. +<3> Writes the tuple.port variable to `UDP.sport` field offset. Set UDP.checksum to 0. +// Hanoch: add how to set UDP.checksum to 0 -.Pcap file output +.Output - pcap file [format="csv",cols="1^,2^,1^", options="header",width="40%"] |================= pkt,Client IPv4,Client Port @@ -1585,14 +1648,13 @@ pkt,Client IPv4,Client Port 6 , 16.0.0.2, 1027 |================= -* Number of clients are two. 16.0.0.1 and 16.0.0.2 -* Number of flows is limited to 129020 (2 * (65535-1025)) -* The stream variable size should match the size of the FlowVarWr instruction +* Number of clients: 2: 16.0.0.1 and 16.0.0.2 +* Number of flows is limited to 129020: (2 * (65535-1025)) +* The stream variable size should match the size of the FlowVarWr instruction. ==== Tutorial: Field Engine, write to a bit-field packet -The following example demonstrates a way to write a stream variable to a bit field packet variable. -In this example an MPLS label field will be changed. +The following example writes a stream variable to a bit field packet variable. In this example, an MPLS label field is changed. .MPLS header [cols="32", halign="center",width="50%"] @@ -1630,18 +1692,17 @@ In this example an MPLS label field will be changed. mode = STLTXSingleBurst( pps = 1, total_pkts = 100) ) ---- -<1> Define varible size of 2 bytes -<2> Write the stream variable label with a shift of 12 bits and with 20bit MSB mask. Cast the stream variables of 2 bytes to 4 bytes -<3> Second MPLS header should be changed +<1> Defines a variable size of 2 bytes. +<2> Writes the stream variable label with a shift of 12 bits, with a 20-bit MSB mask. Cast the stream variables of 2 bytes to 4 bytes. +<3> Change the second MPLS header. ==== Tutorial: Field Engine, Random packet size -The following example demonstrates a way to to change packet size to be a random size. -The way to do it is: -1. Define template packet with maximum size -2. Trim the packet to the size you want -3. Update the packet fields to the new size +The following example demonstrates varies the packet size randomly, as follows: +1. Defines the template packet with maximum size. +2. Trims the packet to the size you want. +3. Updates the packet fields according to the new size. *file*:: link:{github_stl_path}/udp_rand_len_9k.py[stl/udp_rand_len_9k.py] @@ -1681,16 +1742,15 @@ The way to do it is: ] ) ---- -<1> Define a random stream variable with maximum size of the packet -<2> Trim the packet size to the fv_rand value -<3> fix ip.len -<4> fix udp.len +<1> Defines a random stream variable with the maximum size of the packet. +<2> Trims the packet size to the fv_rand value. +<3> Fixes ip.len to reflect the packet size. +<4> Fixes udp.len to reflect the packet size. ==== Tutorial: New Scapy header -The following example demonstrates a way to use a header that is not supported by Scapy in default. -In this example we will show VXLAN support. +The following example uses a header that is not supported by Scapy by default. The example demonstrates VXLAN support. *file*:: link:{github_stl_path}/udp_1pkt_vxlan.py[stl/udp_1pkt_vxlan.py] @@ -1736,24 +1796,23 @@ class STLS1(object): ---- -<1> Download and and add the scapy header or write it -<2> Use it +<1> Downloads and adds a Scapy header from the specified location. Alternatively, write a Scapy header. +<2> Apply the header. -For more information how to define headers see Scapy link:http://www.secdev.org/projects/scapy/doc/build_dissect.html[here] +For more information how to define headers see link:http://www.secdev.org/projects/scapy/doc/build_dissect.html[Adding new protocols] in the Scapy documentation. -==== Tutorial: Field Engine, Many clients +==== Tutorial: Field Engine, many clients -The following example demonstrates a way to generate traffic from many clients with different IP/MAC to one server. -The following figure shows it. +The following example generates traffic from many clients with different IP/MAC addresses to one server. image::images/stl_tut_12.png[title="client->server",align="left",width={p_width}, link="images/stl_tut_12.png"] -1. Send gratuitous ARP from B->D with server IP/MAC (58.55.1.1) -2. DUT learn the ARP of Server IP/MAC (58.55.1.1) -3. Send traffic from A->C with many Clients IP's/MAC's +1. Send gratuitous ARP from B->D with server IP/MAC (58.55.1.1). +2. DUT learns the ARP of server IP/MAC (58.55.1.1). +3. Send traffic from A->C with many client IP/MAC addresses. -Let's take an example: +Example: Base source IPv4 : 55.55.1.1 Destination IPv4: 58.55.1.1 @@ -1762,7 +1821,7 @@ Increment src ipt portion starting at 55.55.1.1 for 'n' number of clients (55.55 Src MAC: start with 0000.dddd.0001, increment mac in steps of 1 Dst MAC: Fixed - 58.55.1.1 -To send gratuitous ARP from TRex server side for this server (58.0.0.1) +The following sends a link:https://wiki.wireshark.org/Gratuitous_ARP[gratuitous ARP] from the TRex server port for this server (58.0.0.1). [source,python] ---- @@ -1776,7 +1835,7 @@ To send gratuitous ARP from TRex server side for this server (58.0.0.1) pdst="58.55.1.1") ---- -Then traffic can be sent from client side A->C +Then traffic can be sent from client side: A->C *file*:: link:{github_stl_path}/udp_1pkt_range_clients_split.py[stl/udp_1pkt_range_clients_split.py] @@ -1813,19 +1872,17 @@ class STLS1(object): return STLStream(packet = STLPktBuilder(pkt = base_pkt/pad,vm = vm), mode = STLTXCont( pps=10 )) ---- -<1> Write the stream variable mac_src with offset of 10 (last 2 bytes of src_mac field) -<2> Write the stream variable mac_src with `offset_fixup` of 2. beacuse we write it with offset +<1> Writes the stream variable `mac_src` with an offset of 10 (last 2 bytes of `src_mac` field). The offset is specified explicitly as 10 bytes from the beginning of the packet. +<2> Writes the stream variable `mac_src` with an offset determined by the offset of `IP.src` plus the `offset_fixup` of 2. -==== Tutorial: Field Engine, Split to core +==== Tutorial: Field Engine, split to core -The following example demonstrates a way to split generated traffic to a number of threads. -Using this feature, there is a way to specify by which field to split the traffic to threads. -Without this feature the traffic is duplicated and all the threads transmits the same traffic. +The following example splits generated traffic into a number of threads. You can specify the field to use for determining how to split the traffic into threads. Without this feature, the traffic is duplicated and all the threads transmit the same traffic. (See the results tables in the examples below in this tutorial.) *Without Split*:: -Let's assume we have two transmitters DP threads +Scenario: 2 transmitters, DP threads [source,python] ---- @@ -1850,8 +1907,8 @@ Let's assume we have two transmitters DP threads ) ---- -<1> Stream variable -<2> write it to IPv4.src +<1> Stream variable. +<2> Write it to IPv4.src. .Variable per thread @@ -1866,12 +1923,12 @@ pkt, thread-0 ip_src,thread-1 ip_src 6 , 16.0.0.6, 16.0.0.6 |================= -* In this case all the threads transmit the same packets +* In the case shown above, all threads transmit the same packets. *With Split feature enabled*:: -Let's assume we have two transmitters DP threads +Scenario: 2 transmitters, DP threads [source,python] ---- @@ -1896,7 +1953,7 @@ Let's assume we have two transmitters DP threads ) ---- -<1> The same example but now we with split by `ip_src` stream variable +<1> Same example as previous, but split by the `ip_src` stream variable. .Variable per thread [format="csv",cols="1^,3^,3^", options="header",width="40%"] @@ -1910,16 +1967,15 @@ pkt, thread-0 ip_src ,thread-1 ip_src 6 , 16.0.0.6, 16.0.0.133 |================= -* In this case the stream variable is split +* In this case the stream variable is split. -To simulate it you can run the following command, let's take the file `stl/udp_1pkt_range_clients_split.py` and simulate it +To simulate this, using the `stl/udp_1pkt_range_clients_split.py` traffic profile, you can run the following command: [source,bash] ---- $./stl-sim -f stl/udp_1pkt_range_clients_split.py -o a.pcap -c 2 -l 10 #<1> ---- -<1> simulate 2 threads -c 2 - +<1> Simulates 2 threads as specified by the `-c 2` option. .Variable per thread [format="csv",cols="1^,3^,3^", options="header",width="40%"] @@ -1933,24 +1989,22 @@ pkt, thread-0 ip_src,thread-1 ip_src 6 , 55.55.0.6 , 55.55.58.158 |================= -*Some rules about Split stream varibles and burst/multi-burst*:: +*Some rules regarding split stream variables and burst/multi-burst*:: -* In case of burst/multi-burst the number of packets are split to number of threads in *default* there is no need an explict split it. -* When the number of packets in a burst is smaller than the number of threads only one thread will do the work. -* In case there is a stream with burst of *1* packet, only the first DP thread will do the work. +* When using burst/multi-burst, the number of packets are split to the defualt number of threads specified in the YAML cofiguraiton file, without any need to explicitly split the threads. +* When the number of packets in a burst is smaller than the number of threads, one thread handles the burst. +* In the case of a stream with a burst of *1* packet, only the first DP thread handles the stream. -==== Tutorial: Field Engine, Split to core with Burst +==== Tutorial: Field Engine, Split to core with burst -The following example demonstrates a way to split generated traffic to a number of threads in the case that we are using Burst stream. -In both cases the number of packets would be split into threads. -Using this feature, The Field engine will be split too. +The following example splits generated traffic into a number of threads when using a stream configured to Burst. In contrast to the previous tutorial, this example uses the Burst pattern. As with the previous tutorial, the number of packets is split into multiple threads. In the example in this tutorial, the Field Engine is split also. -*Without Split*:: +*Without split feature enabled*:: In this example: -* Number of threads are two -* Split is not configured +* Number of threads: 2 +* Split: Not configured [source,python] ---- @@ -1984,9 +2038,9 @@ class STLS1(object): mode = STLTXSingleBurst(total_pkts = 20)) <3> ---- -<1> Stream variable -<2> write it to IPv4.src -<3> burst of 20 packets +<1> Stream variable. +<2> Writes it to `IPv4.src`. +<3> Burst of 20 packets. .Variable per thread [format="csv",cols="1^,3^,3^", options="header",width="40%"] @@ -2004,17 +2058,16 @@ pkt, thread-0 ip_src,thread-1 ip_src 10 , 16.0.0.10, 16.0.0.10 |================= -*The results*:: +*Results*:: -* Total packets are 20 as expected, 10 generated by each thread -* Field engine is the same for both threads +* Total packets are 20 as expected, 10 generated by each thread. +* Field engine is the same for both threads. -*With Split feature enabled*:: +*With split feature enabled*:: [source,python] ---- -# no split class STLS1(object): """ attack 48.0.0.1 at port 80 """ @@ -2046,8 +2099,8 @@ class STLS1(object): mode = STLTXSingleBurst(total_pkts = 20)) <2> ---- -<1> Split is added by `ip_src` stream variable -<2> burst of 20 packets +<1> Split is added by the `ip_src` stream variable. +<2> Burst of 20 packets. .Variable per thread @@ -2066,28 +2119,31 @@ pkt, thread-0 ip_src,thread-1 ip_src 10 , 16.0.0.10, 17.0.0.137 |================= -*The results*:: +*Results*:: -* Total packets are 20 as expected, 10 generated by each thread +* Total packets are 20 as expected, 10 generated by each thread. * Field engine is *not* the same for both threads. ==== Tutorial: Field Engine, Null stream -The following example demonstrates a way create a Stream with no packets. The use cases is to use the Null stream inter stream gap (ISG) and then go to a new stream. -using this you can create loops like this: +The following example creates a stream with no packets. The example uses the inter-stream gap (ISG) of the Null stream, and then starts a new stream. Essentially, this uses one property of the stream (ISG) without actually including packets in the stream. + +This method can create loops like the following: -image::images/stl_null_stream.png[title="Null Stream",align="left",width={p_width}, link="images/stl_null_stream.png"] +image::images/stl_null_stream.png[title="Null stream",align="left",width={p_width}, link="images/stl_null_stream.png"] -1. S1 - send_burst of packets, go to stream NULL -2. NULL - wait ISG time - go to S1 +1. S1 - Sends a burst of packets, then proceed to stream NULL. +2. NULL - Waits the inter-stream gap (ISG) time, then proceed to S1. -Null stream is with configured with +Null stream configuration: -1. mode: burst -2. number of packets: 0 +1. Mode: Burst +2. Number of packets: 0 -==== Tutorial: Field Engine, Barrier stream (Split) - [TODO] +==== Tutorial: Field Engine, Barrier stream (Split) + +*(Future Feature - not yet implemented)* image::images/stl_barrier.png[title="Barrier Stream",align="left",width={p_width}, link="images/stl_barrier.png"] @@ -2097,9 +2153,9 @@ In the above figure we would like to that stream S3 will start on all the thread ==== Tutorial: Pcap file to one stream -*Goal*:: Load stream template packet from pcap file instaed of scapy. +*Goal*:: Load a stream template packet from a pcap file instead of Scapy. -There is an assumption that this pcap has one packet. In case it has more only the first packet is loaded. +Assumption: The pcap file has one packet. If the pcap file has more than one packet, this procedure loads only the first packet. *file*:: link:{github_stl_path}/udp_1pkt_pcap.py[stl/udp_1pkt_pcap.py] @@ -2112,7 +2168,7 @@ There is an assumption that this pcap has one packet. In case it has more only t mode = STLTXCont(pps=10)) ] ---- -<1> packet is taken from pcap file relative to pwd of the script you run +<1> Takes the packet from the pcap file, relative to current directory (pwd) in which you are running the script. *file*:: link:{github_stl_path}/udp_1pkt_pcap_relative_path.py[udp_1pkt_pcap_relative_path.py] @@ -2127,11 +2183,12 @@ There is an assumption that this pcap has one packet. In case it has more only t mode = STLTXCont(pps=10)) ] ---- -<1> packet is taken from pcap file relative to *profile* file location +<1> Takes the packet from the pcap file, relative to directory of the *profile* file location. + -==== Tutorial: Pcap file conversion to many streams +==== Tutorial: pcap file conversion to many streams -*Goal*:: Demonstrates a way to load pcap with *number* of packets and for each packet create a stream with burst of 1. the ISG for each stream is the inter packet gap (IPG) +*Goal*:: Load a pcap file with a *number* of packets, creating a stream with a burst value of 1 for each packet. The inter-stream gap (ISG) for each stream is equal to the inter-packet gap (IPG). *file*:: link:{github_stl_path}/pcap.py[pcap.py] @@ -2145,26 +2202,26 @@ There is an assumption that this pcap has one packet. In case it has more only t ipg_usec = ipg_usec, loop_count = loop_count) ---- -<1> The inter stream gap in usec -<2> How many times to loop -<3> The input pcap file +<1> The inter-stream gap in microseconds. +<2> Loop count. +<3> Input pcap file. image::images/stl_tut_pcap_file1.png[title="pcap file",align="left",width={p_width}, link="images/stl_tut_pcap_file1.png"] -This figure illustrates how the streams look like for pcap file with 3 packets. -* Each stream is configured to burst with one packet -* Each stream point to the next stream. -* The last stream point to the first with action_loop=loop_count in case it was asked (>1) -The profile will run on one DP thread because it has burst with one packet (Split can work in this case) +This figure the streams for a pcap file with 3 packets. +* Each stream is configured to Burst mode, with 1 packet +* Each stream triggers the next stream. +* The last stream triggers the first with `action_loop=loop_count` if `loop_count` > 1. +The profile runs on one DP thread because it has a burst with 1 packet. (Split cannot work in this case). -Running this example +To run this example: [source,bash] ---- ./stl-sim -f stl/pcap.py --yaml ---- -will give this +The following output appears: [source,python] ---- @@ -2265,15 +2322,14 @@ $./stl-sim -f stl/pcap.py --yaml instructions: [] split_by_var: '' ---- -<1> Each stream point to the next stream -<2> Last point to the first -<3> The number of loop is given in `action_count: 1` -<4> Self_start is disabled for all the streams except the first one +<1> Each stream triggers the next stream. +<2> The last stream triggers the first. +<3> The current loop count is given in: `action_count: 1` +<4> `Self_start` is enabled for the first stream, disabled for all other streams. -==== Tutorial: Pcap file to many streams and Field Engine +==== Tutorial: pcap file to many streams and Field Engine -The following example demonstrates a way to load pcap file to many stream and attach to each stream a Field Engine program. -For example change the IP.src of all the streams to a random number +The following example loads a pcap file to many streams, and attaches a Field Engine program to each stream. For example, the Field Engine can change the `IP.src` of all the streams to a random IP address. *file*:: link:{github_stl_path}/pcap_with_vm.py[stl/pcap_with_vm.py] @@ -2329,8 +2385,8 @@ For example change the IP.src of all the streams to a random number return profile.get_streams() ---- -<1> Create Field Engine program, -<2> Apply to all the packets -> convert to streams +<1> Creates Field Engine program. +<2> Applies the Field Engine to all packets -> converts to streams. .Output [format="csv",cols="1^,2^,1^", options="header",width="40%"] @@ -2353,7 +2409,7 @@ pkt, IPv4 , flow ==== Tutorial: Teredo tunnel (IPv6 over IPv4) -The following example demonstrates creating IPv6 packet inside IPv4 packet and create a range of IPs +The following example demonstrates creating an IPv6 packet within an IPv4 packet, and creating a range of IP addresses. *file*:: link:{github_stl_path}/udp_1pkt_ipv6_in_ipv4.py[stl/udp_1pkt_ipv6_in_ipv4.py] @@ -2381,14 +2437,14 @@ The following example demonstrates creating IPv6 packet inside IPv4 packet and c ] ) ---- -<1> Define stream struct name tuple. it has tuple.ip, tuple.port variables -<2> Write stream tuple.ip variable into IPv6.src offset and fixup with 12 bytes (only 4 LSB) -<3> Write stream tuple.port variable into the second UDP header +<1> Defines a stream struct called tuple with the following variables: `tuple.ip`, `tuple.port` +<2> Writes a stream `tuple.ip` variable with an offset determined by the `IPv6.src` offset plus the `offset_fixup` of 12 bytes (only 4 LSB). +<3> Writes a stream `tuple.port` variable into the second UDP header. ==== Tutorial: Mask instruction -The STLVmWrMaskFlowVar is a handy instruction. The pseudocode is as follows: +The STLVmWrMaskFlowVar is single-instruction-multiple-data Field Engine instruction. The pseudocode is as follows: .Pseudocode [source,bash] @@ -2412,6 +2468,8 @@ The STLVmWrMaskFlowVar is a handy instruction. The pseudocode is as follows: *Example 1*:: +This use of STLVmWrMaskFlowVar casts a stream variable with 2 bytes to be 1 byte. + [source,python] ---- vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src", @@ -2427,10 +2485,11 @@ The STLVmWrMaskFlowVar is a handy instruction. The pseudocode is as follows: ---- -This will cast stream variable with 2 byte to be 1 byte *Example 2*:: +This use of STLVmWrMaskFlowVar shifts a variable by 8, which effectively multiplies by 256. + [source,python] ---- @@ -2447,7 +2506,6 @@ This will cast stream variable with 2 byte to be 1 byte ) ---- -The output will be shift by 8 .Output [format="csv",cols="1^", options="header",width="20%"] @@ -2460,6 +2518,8 @@ The output will be shift by 8 *Example 3*:: +This use of STLVmWrMaskFlowVar instruction to generate the values shown in the table below as offset values for `pkt_offset`. + [source,python] ---- vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src", @@ -2476,7 +2536,7 @@ The output will be shift by 8 ) ---- -<1> take var mac_src>>1 and write the LSB every two packet there should be a change +<1> Divides the value of `mac_src` by 2, and writes the LSB. For every two packets, the value written is changed. .Output [format="csv",cols="1^", options="header",width="20%"] @@ -2492,35 +2552,41 @@ value 0x01 |================= -==== Tutorial: Advance traffic profile +==== Tutorial: Advanced traffic profile + +*Goal*:: -As said above, every traffic profile must define the following function: +* Define a different profile to operate in each traffic direction. +* Define a different profile for each port. +* Tune a profile tune by the arguments of tunables. + +Every traffic profile must define the following function: [source,python] ---- def get_streams (self, direction = 0, **kwargs) ---- -'direction' is a mandatory field that will always be provided for any profile -being loaded. +`direction` is a mandatory field, required for any profile being loaded. -Besides that, a profile can be provided with any key-value pairs which can be -used to customize this profile - we call these 'tunables'. +A profile can be given any key-value pairs which can be used to customize this profile. These are called "tunables". -It is up to the profile to define which tunables it can accept and customize -the output based on them. +The profile defines which tunables can be input to customize output. -[NOTE] -===================================================================== -All paramteres must be provided with default values. A profile must be loadable with no paramters. -**kwargs contains all the automatically provided values which are not -tunables. -Every tuanble must be expressed as key-value pair with default value. -===================================================================== +*Usage notes for defining parameters*:: + +* All parameters require default values. +* A profile must be loadable with no parameters specified. +* **kwargs (see Python documentation for information about keyworded arguments) contain all of the automatically provided values which are not tunables. +* Every tuanble must be expressed as key-value pair with default value. + + +For example, for the profile below, 'pcap_with_vm.py': +* The profile receives 'direction' as a tunable and mandatory field. +* The profile defines 4 additional tunables. +* Automatic values such as 'port_id' which are not tunables will be provided on kwargs. -For example, -let's take a look at a profile called 'pcap_with_vm.py' *file*:: link:{github_stl_path}/pcap_with_vm.py[stl/pcap_with_vm.py] @@ -2535,18 +2601,10 @@ def get_streams (self, **kwargs) ---- -This profile gets 'direction' as a tunable and mandatory field. -Define 4 more tunables which the profile decided about, -And automatic values such as 'port_id' which are not tunables will be provided on kwargs. - *Direction*:: -Direction is a tunable that will always be provided by the API/console when loading -a profile, but it can be overriden by the user. -It is used to make the traffic profile more usable such as bi-directional profile. -However, a profile is free to ignore this parameter. +`direction` is a tunable that is always provided by the API/console when loading a profile, but it can be overridden by the user. It is used to make the traffic profile more usable - for example, as a bi-directional profile. However, the profile can ignore this parameter. -As default 'direction' will be equal to port_id % 2, so the *even* ports will be -provided with ''0'' and the *odd* ones with ''1''. +By default, `direction` is equal to port_id % 2, so *even* numbered ports are provided with ''0'' and the *odd* numbered ports with ''1''. [source,python] ---- @@ -2567,28 +2625,27 @@ def get_streams (self, direction = 0,**kwargs): ), ] ---- -<1> Different rate base on direction +<1> Specifies different rates (100 and 200) based on direction. [source,bash] ---- $start -f ex1.py -a ---- -If you have 4 interfaces +For 4 interfaces: -interfaces 0/2 is direction 0 -interfaces 1/3 is direction 1 +* Interfaces 0 and 2: direction 0 +* Interfaces 1 and 3: direction 1 -So rate will be changed accordingly. +The rate changes accordingly. *Customzing Profiles Using ''port_id''*:: -**kwargs provide default values that are passed along to the profile. -such a value is 'port_id' - which is the port ID for the profile. +Keyworded arguments (**kwargs) provide default values that are passed along to the profile. + +In the following, 'port_id' (port ID for the profile) is a **kwarg. Using port_id, you can define a complex profile based on different ID of ports, providing a different profile for each port. + -Using that you can define one can define a complex profile based -on different ID of ports. - [source,python] ---- @@ -2644,7 +2701,7 @@ def create_streams (self, direction = 0, **args): *Full example using the TRex Console*:: -Let's take the previous pcap_with_vm.py and examine it with the console: +The following command displays information about tunables for the pcap_with_vm.py traffic profile. [source,bash] ---- @@ -2669,8 +2726,7 @@ Tunables: ['direction = 0', 'ip_src_range = None', 'loop_count = 5', 'ip trex> ---- -So we can provide tunables on all those fields. -Let's change some: +One can provide tunables on all those fields. The following command changes some: [source,bash] ---- @@ -2698,9 +2754,11 @@ Starting traffic on port(s) [0, 1, 2, 3]: [SUCCESS] trex> ---- + +The following command customizes these to different ports: + [source,bash] ---- -We can also customize these to different ports: trex>start -f stl/pcap_with_vm.py --port 0 1 -t ipg_usec=15.0,loop_count=25#ipg_usec=100,loop_count=300 @@ -2720,30 +2778,30 @@ Starting traffic on port(s) [0, 1]: [SUCCESS] trex> ---- -==== Tutorial: Per stream statistics -* Per stream statistics is implemented using hardware assist when possible (X710/XL710 Intel NICs flow director rules for example). -* With other NICs (Intel I350, 82599) it is implemented in software. -* Implementation works as follows: -** User chooses 32 bit packet group id (pg_id). -** IPv4 Identification field of the stream is changed to a value with in a reserved range (0xff00 to 0xffff). Notice that if a stream for which no statistics is needed has IPv4 Identification in the reserved range, it is changed (left bit becomes 0). +==== Tutorial: Per stream statistics -* In the software implementation, hardware rules are used to direct packets from relevant streams to rx thread, where they are counted. In the hardware implementation, HW rules are inserted to count packets from relevant streams. -* Summed up statistics (per stream, per port) are sent using ZMQ async channel to clients. +* Per stream statistics are implemented using hardware assist when possible (examples: Intel X710/XL710 NIC flow director rules). +* With other NICs (examples: Intel I350, 82599), per stream statistics are implemented in software. +* Implementation: +** User chooses 32-bit packet group ID (pg_id). +** The IPv4 identification field of the stream is changed to a value within a reserved range (0xff00 to 0xffff). Note that if a stream for which no statistics are needed has an IPv4 Identification in the reserved range, it is changed (the left bit becomes 0). +** Software implementation: Hardware rules are used to direct packets from relevant streams to rx thread, where they are counted. +** Hardware implementation: Hardware rules are inserted to count packets from relevant streams. +* Summed up statistics (per stream, per port) are sent using a link:http://zguide.zeromq.org/[ZMQ] async channel to clients. *Limitations*:: -* Currently, the feature supports only two packet types: -** IPv4 over ethernet -** IPv4 with one vlan tag -* Number of concurrent streams you can get statistics for is 128. +* The feature supports 2 packet types: +** IPv4 over Ethernet +** IPv4 with one VLAN tag +* Maximum number of concurrent streams on which statistics may be collected: 128 -We'll demonstrate this with two examples, one that uses the console and one that uses the Python API. +Two examples follow, one using the console and the other using the Python API. *Console*:: -In order to use the console, we'll take a simple profile which defines -two streams and configure them with two different PG IDs. +The following simple traffic profile defines 2 streams and configures them with 2 different PG IDs. *file*:: link:{github_stl_path}/flow_stats.py[stl/flow_stats.py] @@ -2764,12 +2822,12 @@ class STLS1(object): ---- -<1> assigned to PG ID 7 -<2> assigned to PG ID 12 +<1> Assigned to PG ID 7 +<2> Assigned to PG ID 12 -Now we will inject this to the console and use the TUI to see what's going on: +The following command injects this to the console and uses the textual user interface (TUI) to display the TRex activity: -[source,python] +[source,bash] ---- trex>start -f stl/flow_stats.py --port 0 @@ -2789,32 +2847,32 @@ Streams Statistics PG ID | 12 | 7 -------------------------------------------------- - Tx pps | 5.00 Kpps | 999.29 pps <1> + Tx pps | 5.00 Kpps | 999.29 pps #<1> Tx bps L2 | 23.60 Mbps | 479.66 Kbps Tx bps L1 | 24.40 Mbps | 639.55 Kbps --- | | - Rx pps | 5.00 Kpps | 999.29 pps <2> - Rx bps | N/A | N/A <3> + Rx pps | 5.00 Kpps | 999.29 pps #<2> + Rx bps | N/A | N/A #<3> ---- | | opackets | 222496 | 44500 ipackets | 222496 | 44500 obytes | 131272640 | 2670000 - ibytes | N/A | N/A <3> + ibytes | N/A | N/A #<3> ----- | | tx_pkts | 222.50 Kpkts | 44.50 Kpkts rx_pkts | 222.50 Kpkts | 44.50 Kpkts tx_bytes | 131.27 MB | 2.67 MB - rx_bytes | N/A | N/A <3> + rx_bytes | N/A | N/A #<3> ---- -<1> TX bandwidth of the streams matches the configured values -<2> RX bandwidth means that no drops were seen -<3> RX BPS is not supported on this platform (no hardware support for BPS) hence the N/A. +<1> Tx bandwidth of the streams matches the configured values. +<2> Rx bandwidth (999.29 pps) matches the Tx bandwidth (999.29 pps), indicating that there were no drops. +<3> RX BPS is not supported on this platform (no hardware support for BPS), so TRex displays N/A. *Flow Stats Using The Python API*:: -We'll use the following example: +The Python API example uses the following traffic profile: [source,python] ---- @@ -2855,18 +2913,14 @@ def rx_example (tx_port, rx_port, burst_size): rx_pkts = flow_stats['rx_pkts'].get(rx_port, 0) <2> ---- -<1> define the stream to use PG ID 5 -<2> the structure of the object ''flow_stats'' is described below - -==== flow_stats object structure -A dictionary which keys are the configured PG IDs. - -The next level is a dictionary which contains 'tx_pkts', 'tx_bytes' and 'rx_pkts'. +<1> Configures the stream to use PG ID 5. +<2> The structure of the object ''flow_stats'' is described below. -Each one of those keys contain a dictionary of per port values. +==== Tutorial: flow_stats object structure +The flow_stats object is a dictionary whose keys are the configured PG IDs. The next level is a dictionary that contains 'tx_pkts', 'tx_bytes', and 'rx_pkts'. Each of these keys contains a dictionary of per port values. -Here is a printout of flow_stats object for 3 PG IDs after a specific run: +The following shows a flow_stats object for 3 PG IDs after a specific run: [source,bash] ---- @@ -2886,20 +2940,27 @@ Here is a printout of flow_stats object for 3 PG IDs after a specific run: ---- ==== TODO + +// note TODO + * TUI should show Tx/Rx stats [TODO] * Python API to get the info [TODO] ==== Tutorial: Per stream latency/Jitter [TODO] +*(Future Feature - not yet implemented)* +// note TODO ==== Tutorial: HLT traffic profile -traffic_config API has set of arguments for specifying stream. In particular the packet template and which field and how to send it. -It is possible to define a traffic profile using HTTAPI arguments . -Under the hood there is a compiler that converts it to native scapy/field engine instruction -The support is limited, see xref:altapi-support[here]. +The traffic_config API has set of arguments for specifying streams - in particular, the packet template, which field, and how to send it. +// clarify "which field" +It is possible to define a traffic profile using HTTAPI arguments. +// clarify names: "HLT traffic profile", "traffic_config API", "HTTAP" +The API creates native Scapy/Field Engine instructions. +For limitations see xref:altapi-support[here]. *file*:: link:{github_stl_path}/hlt/hlt_udp_inc_dec_len_9k.py[stl/hlt/hlt_udp_inc_dec_len_9k.py] @@ -2941,21 +3002,26 @@ class STLS1(object): return self.create_streams() ---- -This profile can be run with the simulator to generate pcap file +The following command, within a bash window, runs the traffic profile with the simulator to generate pcap file. [source,bash] ---- $ ./stl-sim -f stl/hlt/hlt_udp_inc_dec_len_9k.py -o b.pcap -l 10 ---- -It can be converted to native json or YAML +The following commands, within a bash window, convert to native JSON or YAML. [source,bash] ---- -$ ./stl-sim -f stl/hlt/hlt_udp_inc_dec_len_9k.py --josn +$ ./stl-sim -f stl/hlt/hlt_udp_inc_dec_len_9k.py --json ---- -or converted to native Python profile you can use this command +[source,bash] +---- +$ ./stl-sim -f stl/hlt/hlt_udp_inc_dec_len_9k.py --yaml +---- + +Alternatively, use the following command to convert to a native Python profile. [source,bash] ---- @@ -3011,35 +3077,37 @@ def register(): return STLS1() ---- - -to run it using using the TRex Console +Use the following command within the TRex console to run the profile. [source,bash] ---- TRex>start -f stl/hlt/hlt_udp_inc_dec_len_9k.py -m 10mbps -a ---- -more profiles and example can be found in `stl/hlt` folder - === Reference -Have a look link:cp_stl_docs/index.html[Python Client API] +Additional profiles and examples are available in the `stl/hlt` folder. + +For information about the Python client API, see the link:cp_stl_docs/index.html[Python Client API documentation]. === Console commands ==== Overview -The console will use TRex Client API for controling TRex -Some guidelines: +The console uses the TRex client API to control TRex. + +*Important information about use of the console*:: -* Console should not save it own state, it should only cache server state. It assumed there is only one console that has R/W capability so once connected as R/W console (per user/interface) it could read the server state and then cache all the operations. -* There could be many read-only clients for the same user same interface. -* Console should sync with server to get the state in connection stage and cache the server information locally -* In case of crash/exit of the Console it should sync again at startup -* Commands will be like bash shell - no order args, many flags -* Ability to show stats in real time. Gives the option to open two Console one for statistics and one for commands ( many read-only clients) +// it seems that all of these provide background info, not guidelines for use. the use of "should" is unclear. -==== Ports State +* The console does not save its own state. It caches the server state. It is assumed that there is only one console that has R/W capability, so once connected as R/W console (per user/interface), it can read the server state and then cache all operations. +* There may be many read-only clients for the same user interface. +* The console syncs with the server to get the state during the connection stage, and caches the server information locally. +* In case of crash or exit of the console, it syncs again at startup. +* Commands are similar to bash shell commands - no order args, many flags. +* The console can display TRex stats in real time. You can open two consoles simultaneously - one for commands and one for displaying statistics. + +==== Ports State [options="header",cols="^1,3a"] |================= @@ -3064,12 +3132,13 @@ Some guidelines: ==== Common Arguments -This section includes arguments that are common to many commands -In the command they will be marked like this (arg name) +The command descriptions include arguments common to many commands, designated as: (arg name) ==== Port mask -this gives the ability to choose batch of ports +The port mask enbales selecting a range or set of ports. + +*Example*:: [source,bash] ---- @@ -3084,7 +3153,9 @@ $command [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] ==== Duration -duration in second or in min or hours +Duration is expressed in seconds, minutes, or hours. + +*Example*:: [source,bash] ---- @@ -3099,6 +3170,10 @@ $command[-d 100] [-d 10m] [-d 1h] ==== Multiplier +The traffic profile defines the bandwidth for each stream. The multiplier function normalizes the bandwidth of streams to a specified bandwidth, PPS, or port percentage. + +*Example*:: + [source,bash] ---- $command [-m 100] [-m 10gb] [-m 10kpps] [-m 40%] @@ -3106,7 +3181,7 @@ $command [-m 100] [-m 10gb] [-m 10kpps] [-m 40%] multiplier : -m 100 : multiply stream file by this factor - -m 10gb : from graph calculate the maximum rate as this bandwidth for all streams( for each port ) + -m 10gbps : from graph calculate the maximum rate as this bandwidth for all streams( for each port ) -m 10kpps : from graph calculate the maximum rate as this pps for all streams ( for each port ) -m 40% : from graph calculate the maximum rate as this precent from total port ( for each port ) ---- @@ -3114,7 +3189,14 @@ $command [-m 100] [-m 10gb] [-m 10kpps] [-m 40%] ==== Commands -===== Connect +===== connect + +* Attempts to connect to server +* Sends a ping command +* Syncs with the port info and stream info state +* Reads all counter stats for reference + +*Example*:: [source,bash] ---- @@ -3128,29 +3210,27 @@ $trex-con [--ip $IP] [--server $IP] [--rpc-port $PORT] [--async_port port] --ip or --server :default 127.0.0.1 the TRex server ip ---- -This command -* try to connect to server -* send ping command -* sync with all the ports info / streams info state -* read all counters stats for reference - ===== reset -Reset the server and client to a known state - should not be used in a normal scenario +Resets the server and client to a known state. Not used in normal scenarios. + +- Forces acquire on all ports +- Stops all traffic on all ports +- Removes all streams from all ports + +*Example*:: [source,bash] ---- $reset ---- -- force acuire all the ports -- Stop all traffic on all the ports -- Remove all the streams from all the ports - ===== port -Configure port state, autoneg, rate etc +Configures port state, autoneg, rate, and so on. + +*Example*:: [source,bash] ---- @@ -3163,7 +3243,9 @@ $port (port mask) --cfg "auto/10/" ===== clear -Clear all port stats counters +Clears all port stats counters. + +*Example*:: [source,bash] ---- @@ -3173,7 +3255,9 @@ $clear (port mask) ===== stats -Shows global and port statistic +Shows global and port statistics. + +*Example*:: [source,bash] ---- @@ -3188,8 +3272,10 @@ $stats (port mask) [-g] [-p] [-ps] ===== streams -Shows the configured streams on each port/ports -Should show from client cache +Shows the configured streams on each port, from the client cache. +// clarify "should" + +*Example*:: [source,bash] ---- @@ -3202,7 +3288,7 @@ $streams (port mask) [--streams mask] [-f] [--full] [--graph] ---- -example +*Example*:: [source,bash] ---- @@ -3226,7 +3312,9 @@ port 1 : imix/a.yaml ---- -show only port 1 and 2 +*Example*:: + +Use this command to show only ports 1 and 2. [source,bash] ---- @@ -3236,26 +3324,28 @@ $streams --port 1 2 .. ---- +*Example*:: + +Use this command to show full information for stream 0 and port 0, output in JSON format. + [source,bash] ---- $streams --port 0 --streams 0 -f - - show the full info on stream 0 and port 0, print in JSON format - ---- ===== start -* work on a set of ports -* remove all streams -* load new streams -* start traffic with specific multiplier -* limit the traffic to a specific duration -* port state should be stopped, in case of --force stop the port -* in case one of the port is not stop don't start any port -* all ports should be in state IDLE or STREAMS +* Operates on a set of ports +* Removes all streams +* Loads new streams +* Starts traffic with a specific multiplier +* Limits the traffic to a specific duration +* Acts only on ports in "stopped: mode. Using `--force` first stops the port(s). +* Note: If any ports are not in "stopped" mode, the command fails. + +*Example*:: [source,bash] ---- @@ -3271,104 +3361,91 @@ $start [--force] (port mask) [-f stl/imix.py] [-db ab] (duration) (multiplier) ---- -examples +*Example*:: +Use this command to start the profile on all all ports, with a maximum bandwidth of 10 GB. [source,bash] ---- $start -a -f stl/imix.py -m 10gb ---- -start this profile on all all ports maximum bandwidth is 10gb +*Example*:: + +Use this command to start the profile on ports 1 and 2, multiplies the bandwidth specified in the traffic profile by 100. [source,bash] ---- $start -port 1 2 -f stl/imix.py -m 100 ---- -start this profile on port 1,2 multiply by 100 -[NOTE] -===================================== - in case of start command without args, try to remember the last args given and reprint them -===================================== - ===== stop -* work on a set of ports -* change the mode of the port to stopped -* do not remove the streams -* in case port state is already stopped don't do anything -* all ports should be in state WORK +* Operates on a set of ports +* Changes the mode of the port to "stopped" +* Does not remove streams + +*Example*:: +Use this command to stop the specified ports. + +See the port mask description. [source,bash] ---- $stop (port mask) - See ports command explanation from the start - ---- ===== pause -* work on a set of ports -* move a wokring set of ports to a state of pause -* all ports should be in state WORK +* Operates on a set of ports +* Changes a working set of ports to a "pause" state +*Example*:: +See the port mask description. [source,bash] ---- $pause (port mask) - see ports command explanation from start - ---- ===== resume -* work on a set of ports -* move a wokring set of port to a state of resume -* all ports should be in state PAUSE +* Operates on a set of ports +* Changes a working set of port to a "resume" state +* All ports should be in "paused" status. If any of the ports are not paused, the command fails. +*Example*:: +See the port mask description. [source,bash] ---- $resume (port mask) - see ports command explanation from start - ---- -===== restart - -* restart the work on the loaded streams -* same as start without the -f /--db switch -* all ports should be in state STREAMS - -[source,bash] ----- -$restart (port mask) (duration) (multiplier) +===== update - see ports command explanation from start +Update the bandwidth multiplier for a mask of ports. ----- +* All ports must be in "work" state. If any ports are not in "work" state, the command fails. -===== update - -* all ports should be in state WORK +*Example*:: +See the descriptions for port mask and multiplier. [source,bash] ---- >update (port mask) (multiplier) ---- -Update the bandwidth multiplier for a mask of ports [NOTE] @@ -3377,24 +3454,26 @@ Update the bandwidth multiplier for a mask of ports ===================================== -===== tui +===== TUI + +The textual user interface (TUI) displays constantly updated TRex states in a textual window, similar to the Linux "top" tool. -shows the stats in a textual window (like top) +*Example*:: [source,bash] ---- $tui ---- -enter to a mode of Stats and present 3 type of windows -* global/port stats/version/connected etc -* per port -* per port streams info +Enters a Stats mode and displays three types of TRex statistics: +* Global/port stats/version/connected etc +* Per port +* Per port stream -get keyboard - q - quit the gui window - c - clear all counters +The followig keyboard commands operate in the TUI window: + q - Quit the TUI window + c - Clear all counters === Appendix -- cgit 1.2.3-korg From 6719dcbe9bd291c631ebaa40ae41ebef523569b0 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 28 Mar 2016 17:31:17 +0300 Subject: minor --- draft_trex_stateless_moved1.asciidoc | 1 - 1 file changed, 1 deletion(-) diff --git a/draft_trex_stateless_moved1.asciidoc b/draft_trex_stateless_moved1.asciidoc index 761d2b18..8cda35e5 100644 --- a/draft_trex_stateless_moved1.asciidoc +++ b/draft_trex_stateless_moved1.asciidoc @@ -23,7 +23,6 @@ endif::backend-xhtml11[] include::trex_ga.asciidoc[] - moved to link:draft_trex_stateless.html[here] -- cgit 1.2.3-korg From 73961470ca62926935cceb56e90dfc094449e326 Mon Sep 17 00:00:00 2001 From: DavidBlock Date: Wed, 30 Mar 2016 12:35:36 +0300 Subject: David edit 2 -edits to Stateless doc --- trex_stateless.asciidoc | 133 +++++++++++++++++++++++++++--------------------- 1 file changed, 75 insertions(+), 58 deletions(-) mode change 100644 => 100755 trex_stateless.asciidoc diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc old mode 100644 new mode 100755 index ebcdff36..215e2283 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -2,7 +2,7 @@ TRex Stateless support ====================== :author: TRex team :email: trex.tgen@gmail.com -:revnumber: 1.95 +:revnumber: 1.96 :quotes.++: :numbered: :web_server_url: http://trex-tgn.cisco.com/trex @@ -30,7 +30,7 @@ For information, see the link:trex_manual.html[manual], especially the material == Stateless support (Alpha stage) -=== High level functionality +=== High level functionality // maybe Feature overview * Large scale - Supports a line rate of 14 million packets per second (mpps) per core, scalable with the number of cores @@ -57,14 +57,14 @@ For information, see the link:trex_manual.html[manual], especially the material * Blazingly fast automation support ** Python 2.7/3.0 Client API ** Python HLTAPI Client API -* Multi-user support - multiple users can interact with the same TRex instance simultaneously // added "instance" +* Multi-user support - multiple users can interact with the same TRex instance simultaneously ==== Traffic profile example -// Need explanation of example in figure. +The following example shows three streams configured for Continuous, Burst, and Multi-burst traffic. -image::images/stl_streams_example.png[title="Streams example",align="left",width={p_width}, link="images/stl_streams_example.png"] +image::images/stl_streams_example.png[title="Stream example",align="left",width={p_width}, link="images/stl_streams_example.png"] ==== High level functionality - near future @@ -93,7 +93,7 @@ TRex has limited functionality compared to IXIA, but has some advantages. The fo | Tx Mode | Continuous/Burst/Multi-burst | Continuous/Burst/Multi-burst| | ARP Emulation | Yes | Not yet - workaround | | Automation | TCL/Python wrapper to TCL | [green]*native Python/Scapy* | -| Automation speed sec| 30 sec | [green]*1 msec* | test of load/start/stop/get counters +| Automation speed sec| 30 sec | [green]*1 msec* | Test of load/start/stop/get counters | HLTAPI | Full support. 2000 pages of documentation | Limited. 20 pages of documentation| | Per Stream statistics | 255 streams with 4 global masks | 128 rules for XL710/X710 hardware and software impl for 82599/I350/X550| Some packet type restrictions apply to XL710/X710. | Latency Jitter | Yes | Yes | @@ -129,7 +129,7 @@ image::images/trex_2_stateless.png[title="RPC Server Components",align="left",wi *Interfaces*:: * Automation API: Python is the first client to implement the Python automation API. * User interface: The console uses the Python API to implement a user interface for TRex. -* GUI : The GUI works on top JSON-RPC2 layer +* GUI : The GUI works on top of the JSON-RPC2 layer. *Control of TRex interfaces*:: * Numerous users can control a single TRex server together, from different interfaces. @@ -139,9 +139,8 @@ image::images/trex_2_stateless.png[title="RPC Server Components",align="left",wi * A client in read-write mode can acquire a statistic in real time (with ASYNC ZMQ). This enables viewing statistics through numerous user interfaces (console and GUI) simultaneously. *Synchronization*:: -* A client should sync with the TRex server to get the state in connection time, and cache the server information locally once the state was changed. // Not clear; avoid "should". -* If a client crashes or exits, it should sync again after reconnecting. -// Avoid "should". Meaning will be more clear without "should". +* A client syncs with the TRex server to get the state in connection time, and caches the server information locally after the state has changed. +* If a client crashes or exits, it syncs again after reconnecting. image::images/trex_stateless_multi_user.png[title="Multiple users, per interface",align="left",width={p_width}, link="images/trex_stateless_multi_user.png"] @@ -163,7 +162,7 @@ image::images/stateless_objects.png[title="TRex Objects",align="left",width={p_w * *TRex*: Each TRex instance supports numerous interfaces. // "one or more"? -* *Interface*: Each interface supports one or more traffic profiles (TP). +* *Interface*: Each interface supports one or more traffic profiles. * *Traffic profile*: Each traffic profile supports one or more streams. * *Stream*: Each stream includes: ** *Packet*: Packet template up to 9 KB @@ -173,7 +172,7 @@ image::images/stateless_objects.png[title="TRex Objects",align="left",width={p_w ** *Mode*: Specifies how to send packets: Continuous/Burst/Multi-burst ** *Rx Stats*: Statistics to collect for each stream ** *Rate*: Rate (packets per second or bandwidth) -** *Action*: Specifies stream to follow when the current stream is complete. (valid for Continuous or Burst modes) +** *Action*: Specifies stream to follow when the current stream is complete (valid for Continuous or Burst modes). === Stateful vs Stateless @@ -181,7 +180,7 @@ image::images/stateless_objects.png[title="TRex Objects",align="left",width={p_w TRex Stateless support enables basic L2/L3 testing, relevant mostly for a switch or router. Using Statelss mode, it is possible to define a stream with a *one* packet template, define a program to change any fields in the packet, and run the stream in continuous, burst, or multi-burst mode. With Stateless, you *cannot* learn NAT translation; there is no context of flow/client/server. -* In Stateful mode, the basic building block is a flow/application (composed from many packets). +* In Stateful mode, the basic building block is a flow/application (composed of many packets). * Stateless mode is much more flexible, enabling you to define any type of packet, and build a simple program. .Stateful vs Stateless features @@ -203,7 +202,7 @@ For example, you can load a pcap with the number of packets as a link of streams a->b->c->d-> back to a You can then create a program for each stream to change src_ip=10. 0.0.1-10.0.0.254. This creates traffic similar to that of Stateful mode, but with a completely different basis. -If you are confused you probably need Stateless. +If you are confused you probably need Stateless. :-) === TRex package folders @@ -272,7 +271,7 @@ class STLS1(object): def register(): <4> return STLS1() ---- -<1> Defines the packet. In this case, the packet is IP/UDP with 10 bytes of 'x'. For more information, see: link:http://www.secdev.org/projects/scapy/doc/[Scapy] +<1> Defines the packet. In this case, the packet is IP/UDP with 10 bytes of 'x'. For more information, see the link:http://www.secdev.org/projects/scapy/doc/[Scapy documentation]. <2> Mode: Continuous. Rate: 1 PPS (default rate is 1 PPS) <3> The `get_streams` function is mandatory <4> Each traffic profile module requires a `register` function. @@ -299,6 +298,7 @@ $sudo ./t-rex-64 -i * (Optional) Use `-c` to add more cores. * (Optional) Use `--cfg` to specify a different configuration file. The default is link:trex_manual.html#_create_minimum_configuration_file[trex_cfg.yaml]. +// IGNORE: this line helps rendering of next line *Connect with console*:: @@ -462,7 +462,7 @@ ip route 48.0.0.0 255.0.0.0 1.1.10.2 In this example all the packets will be routed to `TenGigabitEthernet0/1/0` port. The following example uses the `direction` flag to change this. -*file*:: link:{github_stl_path}/udp_1pkt_simple_bdir.py[stl/udp_1pkt_simple_bdir.py] +*File*:: link:{github_stl_path}/udp_1pkt_simple_bdir.py[stl/udp_1pkt_simple_bdir.py] [source,python] ---- @@ -554,7 +554,6 @@ Ether(src="00:bb:12:34:56:01"),"00:bb:12:34:56:01",trex_cfg(dst) Ether(dst="00:bb:12:34:56:01"),trex_cfg(src),"00:bb:12:34:56:01" |================= -Example: *File*:: link:{github_stl_path}/udp_1pkt_1mac_override.py[stl/udp_1pkt_1mac_override.py] @@ -923,8 +922,8 @@ if __name__ == "__main__": print 'Done' ---- -<1> Imports native TRex API. -<2> Imports HLT API. +<1> Imports the native TRex API. +<2> Imports the HLT API. ==== Tutorial: Simple IPv4/UDP packet - Simulator @@ -945,7 +944,7 @@ The TRex simulator can: Example traffic profile: -*file*:: link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] +*File*:: link:{github_stl_path}/udp_1pkt_simple.py[stl/udp_1pkt_simple.py] [source,python] ---- @@ -1215,7 +1214,7 @@ The following are the main traffic profile formats. Native is the preferred form === Traffic profile Tutorials -==== Tutorial: Simple Interleave streams +==== Tutorial: Simple Interleaving streams *Goal*:: Demonstrate interleaving of multiple streams. @@ -1302,7 +1301,7 @@ The following example demonstrates: 2. Burst of 10 packets 3. One stream activating another stream (see `self_start=False` in the traffic profile) -*file*:: link:{github_stl_path}/burst_3pkt_60pkt.py[stl/burst_3pkt_60pkt.py] +*File*:: link:{github_stl_path}/burst_3pkt_60pkt.py[stl/burst_3pkt_60pkt.py] [source,python] @@ -1361,7 +1360,7 @@ TRex>start -f stl/stl/burst_3pkt_60pkt.py --port 0 *Goal* : Use Multi-burst transmit mode -*file*:: link:{github_stl_path}/multi_burst_2st_1000pkt.py[stl/multi_burst_2st_1000pkt.py] +*File*:: link:{github_stl_path}/multi_burst_2st_1000pkt.py[stl/multi_burst_2st_1000pkt.py] [source,python] ---- @@ -1404,7 +1403,7 @@ image::images/stl_tut_4.png[title="Streams example",align="left",width={p_width} *Goal* : Demonstrate a limited loop of streams -*file*:: link:{github_stl_path}/burst_3st_loop_x_times.py[stl/burst_3st_loop_x_times.py] +*File*:: link:{github_stl_path}/burst_3st_loop_x_times.py[stl/burst_3st_loop_x_times.py] [source,python] ---- @@ -1449,7 +1448,7 @@ image::images/stl_tut_4.png[title="Streams example",align="left",width={p_width} This profile defines 3 streams, with packets of different sizes. The rate is different for each stream/size. See the link:https://en.wikipedia.org/wiki/Internet_Mix[Wikipedia article on Internet Mix]. -*file*:: link:{github_stl_path}/imix.py[stl/imix.py] +*File*:: link:{github_stl_path}/imix.py[stl/imix.py] [source,python] ---- @@ -1514,6 +1513,7 @@ This profile defines 3 streams, with packets of different sizes. The rate is dif ---- <1> Constructs a diffrent stream for each direction (replaces src and dest). <2> Even port id has direction==0 and odd has direction==1. +// direction==1 not shown explicitly in the code? <3> Field Engine program to change fields within the packets. // we can link "Field Engine" to an appropriate location for for more info. @@ -1539,7 +1539,7 @@ For more information, see link:trex_rpc_server_spec.html#_object_type_em_vm_em_a The following example demonstrates creating a SYN attack from many src addresses to one server. -*file*:: link:{github_stl_path}/syn_attack.py[stl/syn_attack.py] +*File*:: link:{github_stl_path}/syn_attack.py[stl/syn_attack.py] [source,python] ---- @@ -1606,7 +1606,7 @@ pkt,Client IPv4,Client Port The following example creates multiple flows from the same packet template. The Tuple Generator instructions are used to create two stream variables for IP and port. See link:trex_rpc_server_spec.html#_object_type_em_vm_em_a_id_vm_obj_a[here] // clarify link -*file*:: link:{github_stl_path}/udp_1pkt_tuple_gen.py[stl/udp_1pkt_tuple_gen.py] +*File*:: link:{github_stl_path}/udp_1pkt_tuple_gen.py[stl/udp_1pkt_tuple_gen.py] [source,python] ---- @@ -1631,7 +1631,7 @@ The following example creates multiple flows from the same packet template. The ---- <1> Defines a struct with two dependent variables: tuple.ip, tuple.port <2> Writes the tuple.ip variable to `IPv4.src` field offset. -<3> Writes the tuple.port variable to `UDP.sport` field offset. Set UDP.checksum to 0. +<3> Writes the tuple.port variable to `UDP.sport` field offset. Set `UDP.checksum` to 0. // Hanoch: add how to set UDP.checksum to 0 @@ -1662,7 +1662,7 @@ The following example writes a stream variable to a bit field packet variable. I 0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1| |==== -*file*:: link:{github_stl_path}/udp_1pkt_mpls_vm.py[stl/udp_1pkt_mpls_vm.py] +*File*:: link:{github_stl_path}/udp_1pkt_mpls_vm.py[stl/udp_1pkt_mpls_vm.py] [source,python] ---- @@ -1704,7 +1704,7 @@ The following example demonstrates varies the packet size randomly, as follows: 2. Trims the packet to the size you want. 3. Updates the packet fields according to the new size. -*file*:: link:{github_stl_path}/udp_rand_len_9k.py[stl/udp_rand_len_9k.py] +*File*:: link:{github_stl_path}/udp_rand_len_9k.py[stl/udp_rand_len_9k.py] [source,python] ---- @@ -1743,7 +1743,7 @@ The following example demonstrates varies the packet size randomly, as follows: ) ---- <1> Defines a random stream variable with the maximum size of the packet. -<2> Trims the packet size to the fv_rand value. +<2> Trims the packet size to the `fv_rand` value. <3> Fixes ip.len to reflect the packet size. <4> Fixes udp.len to reflect the packet size. @@ -1752,7 +1752,7 @@ The following example demonstrates varies the packet size randomly, as follows: The following example uses a header that is not supported by Scapy by default. The example demonstrates VXLAN support. -*file*:: link:{github_stl_path}/udp_1pkt_vxlan.py[stl/udp_1pkt_vxlan.py] +*File*:: link:{github_stl_path}/udp_1pkt_vxlan.py[stl/udp_1pkt_vxlan.py] [source,python] ---- @@ -1808,7 +1808,7 @@ The following example generates traffic from many clients with different IP/MAC image::images/stl_tut_12.png[title="client->server",align="left",width={p_width}, link="images/stl_tut_12.png"] -1. Send gratuitous ARP from B->D with server IP/MAC (58.55.1.1). +1. Send a gratuitous ARP from B->D with server IP/MAC (58.55.1.1). 2. DUT learns the ARP of server IP/MAC (58.55.1.1). 3. Send traffic from A->C with many client IP/MAC addresses. @@ -1837,7 +1837,7 @@ The following sends a link:https://wiki.wireshark.org/Gratuitous_ARP[gratuitous Then traffic can be sent from client side: A->C -*file*:: link:{github_stl_path}/udp_1pkt_range_clients_split.py[stl/udp_1pkt_range_clients_split.py] +*File*:: link:{github_stl_path}/udp_1pkt_range_clients_split.py[stl/udp_1pkt_range_clients_split.py] [source,python] ---- @@ -1908,7 +1908,7 @@ Scenario: 2 transmitters, DP threads ---- <1> Stream variable. -<2> Write it to IPv4.src. +<2> Write it to `IPv4.src`. .Variable per thread @@ -2157,7 +2157,7 @@ In the above figure we would like to that stream S3 will start on all the thread Assumption: The pcap file has one packet. If the pcap file has more than one packet, this procedure loads only the first packet. -*file*:: link:{github_stl_path}/udp_1pkt_pcap.py[stl/udp_1pkt_pcap.py] +*File*:: link:{github_stl_path}/udp_1pkt_pcap.py[stl/udp_1pkt_pcap.py] [source,python] ---- @@ -2171,7 +2171,7 @@ Assumption: The pcap file has one packet. If the pcap file has more than one pac <1> Takes the packet from the pcap file, relative to current directory (pwd) in which you are running the script. -*file*:: link:{github_stl_path}/udp_1pkt_pcap_relative_path.py[udp_1pkt_pcap_relative_path.py] +*File*:: link:{github_stl_path}/udp_1pkt_pcap_relative_path.py[udp_1pkt_pcap_relative_path.py] [source,python] @@ -2190,7 +2190,7 @@ Assumption: The pcap file has one packet. If the pcap file has more than one pac *Goal*:: Load a pcap file with a *number* of packets, creating a stream with a burst value of 1 for each packet. The inter-stream gap (ISG) for each stream is equal to the inter-packet gap (IPG). -*file*:: link:{github_stl_path}/pcap.py[pcap.py] +*File*:: link:{github_stl_path}/pcap.py[pcap.py] [source,python] ---- @@ -2208,13 +2208,15 @@ Assumption: The pcap file has one packet. If the pcap file has more than one pac image::images/stl_tut_pcap_file1.png[title="pcap file",align="left",width={p_width/2}, link="images/stl_tut_pcap_file1.png"] -This figure the streams for a pcap file with 3 packets. +This figure shows the streams for a pcap file with 3 packets. + * Each stream is configured to Burst mode, with 1 packet * Each stream triggers the next stream. * The last stream triggers the first with `action_loop=loop_count` if `loop_count` > 1. + The profile runs on one DP thread because it has a burst with 1 packet. (Split cannot work in this case). -To run this example: +To run this example, enter: [source,bash] ---- @@ -2331,7 +2333,7 @@ $./stl-sim -f stl/pcap.py --yaml The following example loads a pcap file to many streams, and attaches a Field Engine program to each stream. For example, the Field Engine can change the `IP.src` of all the streams to a random IP address. -*file*:: link:{github_stl_path}/pcap_with_vm.py[stl/pcap_with_vm.py] +*File*:: link:{github_stl_path}/pcap_with_vm.py[stl/pcap_with_vm.py] [source,python] ---- @@ -2411,7 +2413,7 @@ pkt, IPv4 , flow The following example demonstrates creating an IPv6 packet within an IPv4 packet, and creating a range of IP addresses. -*file*:: link:{github_stl_path}/udp_1pkt_ipv6_in_ipv4.py[stl/udp_1pkt_ipv6_in_ipv4.py] +*File*:: link:{github_stl_path}/udp_1pkt_ipv6_in_ipv4.py[stl/udp_1pkt_ipv6_in_ipv4.py] [source,python] ---- @@ -2444,7 +2446,7 @@ The following example demonstrates creating an IPv6 packet within an IPv4 packet ==== Tutorial: Mask instruction -The STLVmWrMaskFlowVar is single-instruction-multiple-data Field Engine instruction. The pseudocode is as follows: +STLVmWrMaskFlowVar is single-instruction-multiple-data Field Engine instruction. The pseudocode is as follows: .Pseudocode [source,bash] @@ -2468,7 +2470,7 @@ The STLVmWrMaskFlowVar is single-instruction-multiple-data Field Engine instruct *Example 1*:: -This use of STLVmWrMaskFlowVar casts a stream variable with 2 bytes to be 1 byte. +Here, STLVmWrMaskFlowVar casts a stream variable with 2 bytes to be 1 byte. [source,python] ---- @@ -2488,7 +2490,7 @@ This use of STLVmWrMaskFlowVar casts a stream variable with 2 bytes to be 1 byte *Example 2*:: -This use of STLVmWrMaskFlowVar shifts a variable by 8, which effectively multiplies by 256. +Here, STLVmWrMaskFlowVar shifts a variable by 8, which effectively multiplies by 256. [source,python] ---- @@ -2518,7 +2520,7 @@ This use of STLVmWrMaskFlowVar shifts a variable by 8, which effectively multipl *Example 3*:: -This use of STLVmWrMaskFlowVar instruction to generate the values shown in the table below as offset values for `pkt_offset`. +Here, STLVmWrMaskFlowVar instruction to generate the values shown in the table below as offset values for `pkt_offset`. [source,python] ---- @@ -2588,7 +2590,7 @@ For example, for the profile below, 'pcap_with_vm.py': * Automatic values such as 'port_id' which are not tunables will be provided on kwargs. -*file*:: link:{github_stl_path}/pcap_with_vm.py[stl/pcap_with_vm.py] +*File*:: link:{github_stl_path}/pcap_with_vm.py[stl/pcap_with_vm.py] [source,python] ---- @@ -2798,12 +2800,13 @@ trex> * Maximum number of concurrent streams on which statistics may be collected: 128 Two examples follow, one using the console and the other using the Python API. +// immediately below is the console example; where's the Python API example? *Console*:: The following simple traffic profile defines 2 streams and configures them with 2 different PG IDs. -*file*:: link:{github_stl_path}/flow_stats.py[stl/flow_stats.py] +*File*:: link:{github_stl_path}/flow_stats.py[stl/flow_stats.py] [source,python] ---- @@ -2940,7 +2943,9 @@ The following shows a flow_stats object for 3 PG IDs after a specific run: ---- -==== Tutorial: Per stream latency/Jitter [TODO] +==== Tutorial: Per stream latency/Jitter + +// [TODO] *(Future Feature - not yet implemented)* @@ -2955,7 +2960,7 @@ It is possible to define a traffic profile using HTTAPI arguments. The API creates native Scapy/Field Engine instructions. For limitations see xref:altapi-support[here]. -*file*:: link:{github_stl_path}/hlt/hlt_udp_inc_dec_len_9k.py[stl/hlt/hlt_udp_inc_dec_len_9k.py] +*File*:: link:{github_stl_path}/hlt/hlt_udp_inc_dec_len_9k.py[stl/hlt/hlt_udp_inc_dec_len_9k.py] [source,python] ---- @@ -3021,7 +3026,7 @@ Alternatively, use the following command to convert to a native Python profile. $ ./stl-sim -f stl/hlt/hlt_udp_inc_dec_len_9k.py --native ---- -.Auto generated code +.Auto-generated code [source,python] ---- # !!! Auto-generated code !!! @@ -3119,9 +3124,8 @@ The console uses the TRex client API to control TRex. | WORK (pause) -> PAUSE (resume )--- | | | | - -------------------------------------- - ------ + -------------------------------------- +---- ==== Common Arguments @@ -3189,6 +3193,8 @@ $command [-m 100] [-m 10gb] [-m 10kpps] [-m 40%] * Syncs with the port info and stream info state * Reads all counter stats for reference +// IGNORE: this line helps rendering of next line + *Example*:: [source,bash] @@ -3211,6 +3217,8 @@ Resets the server and client to a known state. Not used in normal scenarios. - Stops all traffic on all ports - Removes all streams from all ports +// IGNORE: this line helps rendering of next line + *Example*:: [source,bash] @@ -3259,14 +3267,14 @@ $stats (port mask) [-g] [-p] [-ps] -g show only global stats -p only ports stats -ps only port status (type/driver/link-up/down/negotion type etc) - ---- +// IGNORE - this line helps rendering ===== streams Shows the configured streams on each port, from the client cache. -// clarify "should" + *Example*:: @@ -3338,6 +3346,8 @@ $streams --port 0 --streams 0 -f * Acts only on ports in "stopped: mode. Using `--force` first stops the port(s). * Note: If any ports are not in "stopped" mode, the command fails. +// IGNORE: this line helps rendering of next line + *Example*:: [source,bash] @@ -3379,6 +3389,8 @@ $start -port 1 2 -f stl/imix.py -m 100 * Changes the mode of the port to "stopped" * Does not remove streams +// IGNORE: this line helps rendering of next line + *Example*:: Use this command to stop the specified ports. @@ -3414,6 +3426,8 @@ $pause (port mask) * Changes a working set of port to a "resume" state * All ports should be in "paused" status. If any of the ports are not paused, the command fails. +// IGNORE: this line helps rendering of next line + *Example*:: See the port mask description. @@ -3429,7 +3443,9 @@ $resume (port mask) Update the bandwidth multiplier for a mask of ports. -* All ports must be in "work" state. If any ports are not in "work" state, the command fails. +* All ports must be in "work" state. If any ports are not in "work" state, the command fails + +// IGNORE: this line helps rendering of next line *Example*:: @@ -3443,9 +3459,10 @@ See the descriptions for port mask and multiplier. [NOTE] ===================================== - Here we could add the ability to disable/enable specific stream, load new stream dynamically etc. + Here we could add the ability to disable/enable specific stream, load a new stream dynamically, and so on. ===================================== +// clarify note above ===== TUI -- cgit 1.2.3-korg From c2fdcbf6606c47d0be9709d91899a97d32f46dcf Mon Sep 17 00:00:00 2001 From: imarom Date: Sun, 3 Apr 2016 02:52:47 +0300 Subject: RX filters doc --- trex_rpc_server_spec.asciidoc | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index c48df052..0fa53387 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -1059,6 +1059,46 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj } +---- + + +=== Remove RX Filters +* *Name* - 'remove_rx_filters' +* *Valid States* - 'owned' +* *Description* - Post to calling stop, the client should call this function to remove + any RX filters that were attached. + this is because the server cannot know when it is safe to remove those + (after stop some packets might take time to get to arrive - RTT) +* *Paramters* +** *handler* ['string'] - unique connection handler +** *port_id* ['int'] - port id on which to remove all RX filters + +* *Result* ['object'] - {} + +[source,bash] +---- + +'Request': + +{ + "id": "1jwrw9nx", + "jsonrpc": "2.0", + "method": "remove_rx_filters", + "params": { + "handler": "ywVlqZa8", + "port_id": 3 + } +} + +'Request': + +{ + "id": "1jwrw9nx", + "jsonrpc": "2.0", + "result": {} +} + + ---- === Get Global Stats -- cgit 1.2.3-korg From 0332373c99489590569a8da8e4f9cd06b11fc2a9 Mon Sep 17 00:00:00 2001 From: imarom Date: Sun, 3 Apr 2016 08:39:04 +0300 Subject: API sync (major change) --- trex_rpc_server_spec.asciidoc | 122 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 115 insertions(+), 7 deletions(-) diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 0fa53387..7df63553 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -39,7 +39,9 @@ include::trex_ga.asciidoc[] | 1.5 | Hanoch Haim (hhaim) | - add more instructions (v1.92) - +| 1.6 | Itay Marom (imarom) +| +- added API synchronization |================= @@ -158,8 +160,77 @@ image::images/rpc_states.png[title="Port States",align="left",width=150, link="i == RPC Commands The following RPC commands are supported +=== API Synchronization +* *Name* - 'api_sync' +* *API Class* - 'None' +* *Valid States* - 'not relevant' +* *Description* - Sync with server about API classes. This allows the server and the client + to be sure they are fully synced. + The return values are used for furthur communication with the server. + every API from a specific class requires its corresponding api_h parameter + added to the specific parameters of the function. +* *Paramters* - +** *api_vers* [list] - A list of objects of type xref:api_class['api_class'] +* *Result* ['object'] - A list of objects of type xref:api_class_rc['api_class_rc'] + +.Object type 'api_class' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | name of the API class +| major | int | major version +| minor | int | minor version +|================= + +.Object type 'api_class_rc' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | name of the API class +| api_h | string | API handler for this API class +|================= + +Example: + +[source,bash] +---- +'Request': + +{ + "id": "6d4e9gs3", + "jsonrpc": "2.0", + "method": "api_sync", + "params": { + "api_vers": [ + { + "type": "core" + "major": 1, + "minor": 0, + } + ] + } +} + +'Response': + +{ + "id": "6d4e9gs3", + "jsonrpc": "2.0", + "result": { + "api_vers": [ + { + "type": "core" + "api_h": "SPhoCDIV", + } + ] + } +} + +---- + === Ping * *Name* - 'ping' +* *API Class* - 'None' * *Valid States* - 'not relevant' * *Description* - Pings the TRex server * *Paramters* - None @@ -190,6 +261,7 @@ Example: === Get Server Supported Commands * *Name* - 'get_supported_cmds' +* *API Class* - 'core' * *Valid States* - 'not relevant' * *Description* - Queries the server for all the supported commands * *Paramters* - None @@ -205,7 +277,9 @@ Example: "jsonrpc": "2.0", "id": 1, "method": "get_supported_cmds", - "params": null + "params": { + "api_h": "SPhoCDIV" + } } @@ -231,6 +305,7 @@ Example: === Get Version * *Name* - 'get_version' +* *API Class* - 'core' * *Valid States* - 'not relevant' * *Description* - Queries the server for version information * *Paramters* - None @@ -255,7 +330,9 @@ Example: "id": "wapkk8m6", "jsonrpc": "2.0", "method": "get_version", - "params": null + "params": { + "api_h": "SPhoCDIV" + } } @@ -276,6 +353,7 @@ Example: === Get System Info * *Name* - 'get_system_info' +* *API Class* - 'core' * *Description* - Queries the server for system properties * *Paramters* - None * *Result* ['object'] - See table below @@ -311,7 +389,9 @@ Example: "id": "zweuldlh", "jsonrpc": "2.0", "method": "get_system_info", - "params": null + "params": { + "api_h": "SPhoCDIV" + } } 'Response': @@ -353,6 +433,7 @@ Example: === Get Port Status * *Name* - 'get_port_status' +* *API Class* - 'core' * *Valid States* - 'all' * *Description* - Queries the server for status * *Paramters* - @@ -369,6 +450,7 @@ Example: "jsonrpc": "2.0", "method": "get_port_status", "params": { + "api_h": "SPhoCDIV", "port_id": 2 } } @@ -398,6 +480,7 @@ Example: === Acquire * *Name* - 'Acquire' +* *API Class* - 'core' * *Valid States* - 'all' * *Description* - Takes ownership over the port * *Paramters* - @@ -416,8 +499,9 @@ Example: "jsonrpc": "2.0", "method": "Acquire", "params": { - "user": "itay" - "port_id": 1 + "api_h": "SPhoCDIV", + "user": "itay", + "port_id": 1, "force": false, } } @@ -436,6 +520,7 @@ Example: === Release * *Name* - 'release' +* *API Class* - 'core' * *Valid States* - 'owned' * *Description* - Release owernship over the device * *Paramters* - @@ -453,7 +538,8 @@ Example: "jsonrpc": "2.0", "method": "release", "params": { - "handler": "37JncCHr" + "api_h": "SPhoCDIV", + "handler": "37JncCHr", "port_id": 1 } } @@ -471,6 +557,7 @@ Example: === Add Stream * *Name* - 'add_stream' +* *API Class* - 'core' * *Valid States* - 'owned' * *Description* - Adds a stream to a port * *Paramters* @@ -770,6 +857,7 @@ This could be stream_id different from the stream object which contains the rx_s "jsonrpc": "2.0", "method": "add_stream", "params": { + "api_h": "SPhoCDIV", "handler": "37JncCHr", "port_id": 1, "stream_id": 502 @@ -819,6 +907,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Remove Stream * *Name* - 'remove_stream' +* *API Class* - 'core' * *Valid States* - 'owned' * *Description* - Removes a stream from a port * *Paramters* @@ -838,6 +927,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj "jsonrpc": "2.0", "method": "remove_stream", "params": { + "api_h": "SPhoCDIV", "handler": "37JncCHr", "port_id": 1, "stream_id": 502 @@ -857,6 +947,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Get Stream ID List * *Name* - 'get_stream_list' +* *API Class* - 'core' * *Valid States* - 'unowned', 'owned', 'active' * *Description* - fetch all the assoicated streams for a port * *Paramters* @@ -875,6 +966,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj "jsonrpc": "2.0", "method": "get_stream_list", "params": { + "api_h": "SPhoCDIV", "handler": "37JncCHr", "port_id": 1 } @@ -896,6 +988,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Get Stream * *Name* - 'get_stream' +* *API Class* - 'core' * *Valid States* - 'unowned', 'owned', 'active' * *Description* - get a specific stream object * *Paramters* @@ -915,6 +1008,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj "jsonrpc": "2.0", "method": "get_stream", "params": { + "api_h": "SPhoCDIV", "handler": "37JncCHr", "port_id": 1, "stream_id": 7 @@ -954,6 +1048,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Remove All Streams * *Name* - 'remove_all_streams' +* *API Class* - 'core' * *Valid States* - 'owned' * *Description* - remove all streams from a port * *Paramters* @@ -973,6 +1068,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj "jsonrpc": "2.0", "method": "remove_all_streams", "params": { + "api_h": "SPhoCDIV", "handler": "37JncCHr", "port_id": 2 } @@ -992,6 +1088,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Start Traffic * *Name* - 'start_traffic' +* *API Class* - 'core' * *Valid States* - 'owned' * *Description* - Starts the traffic on a specific port. if traffic has already started an error will be returned * *Paramters* @@ -1010,6 +1107,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj "jsonrpc": "2.0", "method": "start_traffic", "params": { + "api_h": "SPhoCDIV", "handler": "37JncCHr", "port_id": 3 } @@ -1027,6 +1125,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Stop Traffic * *Name* - 'stop_traffic' +* *API Class* - 'core' * *Valid States* - 'active' * *Description* - Stops the traffic on a specific port. if the port has already started nothing will happen * *Paramters* @@ -1045,6 +1144,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj "jsonrpc": "2.0", "method": "stop_traffic", "params": { + "api_h": "SPhoCDIV", "handler": "37JncCHr", "port_id": 3 } @@ -1064,6 +1164,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Remove RX Filters * *Name* - 'remove_rx_filters' +* *API Class* - 'core' * *Valid States* - 'owned' * *Description* - Post to calling stop, the client should call this function to remove any RX filters that were attached. @@ -1085,6 +1186,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj "jsonrpc": "2.0", "method": "remove_rx_filters", "params": { + "api_h": "SPhoCDIV", "handler": "ywVlqZa8", "port_id": 3 } @@ -1103,6 +1205,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Get Global Stats * *Name* - 'get_global_stats' +* *API Class* - 'core' * *Valid States* - 'unowned', 'owned', 'active' * *Description* - Get machine global stats * *Paramters* - None @@ -1128,6 +1231,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Get Port Stats * *Name* - 'get_port_stats' +* *API Class* - 'core' * *Valid States* - 'unowned', 'owned', 'active' * *Description* - Get port stats * *Paramters* @@ -1154,6 +1258,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj === Get Stream Stats * *Name* - 'get_steram_stats' +* *API Class* - 'core' * *Valid States* - 'unowned', 'owned', 'active' * *Description* - Get port stats * *Paramters* @@ -1252,6 +1357,7 @@ On the following example, there's no VM instructions, rx_stats option is disable "jsonrpc" : "2.0", "method" : "add_stream", "params" : { + "api_h": "SPhoCDIV", "handler" : "37JncCHr", "port_id" : 1, "stream" : { @@ -1332,6 +1438,7 @@ Ontop, this stream is the last stream of the sequence, so `next_stream_id` of `s "jsonrpc" : "2.0", "method" : "add_stream", "params" : { + "api_h": "SPhoCDIV", "handler" : "2JjzhMai", "port_id" : 3, "stream" : { @@ -1374,6 +1481,7 @@ Ontop, this stream is the last stream of the sequence, so `next_stream_id` of `s "jsonrpc" : "2.0", "method" : "add_stream", "params" : { + "api_h": "SPhoCDIV", "handler" : "2JjzhMai", "port_id" : 3, "stream" : { -- cgit 1.2.3-korg From 1691c6e374b0dcd4e159632c863054710a2da323 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 4 Apr 2016 10:28:44 +0300 Subject: v1.99 --- release_notes.asciidoc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 4eb0a3c6..09dbeced 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -24,6 +24,18 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 1.99 == + + +* The Client package includes Console/examples +* Client API verification mechanism. The client should match the server version range + +=== fix issues: === + +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-193[trex-193] +* Python2/Python3 client API hardening +* Per stream statistics in software hardening - add support for wait on rx packets + == Release 1.98 == -- cgit 1.2.3-korg From f1fc8ff35f9c632fcb4d8c5804d8c725dcd0a94a Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Tue, 5 Apr 2016 10:34:48 +0300 Subject: added appendix 'Configure Linux host as network emulator' --- trex_book.asciidoc | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 3 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 6b0db870..b58651ca 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -1699,6 +1699,64 @@ enabled=0 * Reboot +=== Configure Linux host as network emulator + +There are lots of Linux tutorials on the web, so this will not be full tutorial, only highlighting some key points. Commands +were checked on Ubuntu system. + +==== Enable forwarding +One time (will be discarded after reboot): + +[source,bash] +---- +echo 1 > /proc/sys/net/ipv4/ip_forward +---- +To make this permanent, add the following line to the file /etc/sysctl.conf: + +---- +net.ipv4.ip_forward=1 +---- + +==== Add static routes +Example if for the default TRex networks, 48.0.0.0 and 16.0.0.0. + +Routing all traffic from 48.0.0.0 to the gateway 10.0.0.100 +[source,bash] +---- +route add -net 48.0.0.0 netmask 255.255.0.0 gw 10.0.0.100 +---- + +Routing all traffic from 16.0.0.0 to the gateway 172.168.0.100 +[source,bash] +---- +route add -net 16.0.0.0 netmask 255.255.0.0 gw 172.168.0.100 +---- +If you use stateless mode, and decide to add route only in one direction, remember to disable reverse path check. + +For example, to disable on all interfaces: +[source,bash] +---- +for i in /proc/sys/net/ipv4/conf/*/rp_filter ; do + echo 0 > $i +done +---- + +Alternatively, you can edit /etc/network/interfaces, and add something like this for both ports connected to TRex. +This will take effect, only after restarting networking (rebooting the machine in an alternative also). +---- +auto eth1 +iface eth1 inet static +address 16.0.0.100 +netmask 255.0.0.0 +network 16.0.0.0 +broadcast 16.255.255.255 +... same for 48.0.0.0 +---- + +==== Add static ARP entries +[source,bash] +---- +sudo arp -s 10.0.0.100 +sudo arp -s 172.168.0.100 <1> <2> We are planning to add MACs to `./dpdk_setup_ports.py -s` ===================================== - -Q: TRex traffic doesn't show up on Wireshark, So I can't capture the traffic from the TRex port + -A: TRex uses DPDK that take ownership on the ports. We are using a Switch with port mirroring to capture the traffic +Q: TRex traffic does not show up on Wireshark, so I can not capture the traffic from the TRex port + +A: TRex uses DPDK which takes ownership of the ports, so using Wireshark is not possible. You can use switch with port mirroring to capture the traffic. -- cgit 1.2.3-korg From f12a1fe22bafc78e335c385208f60ecd9cabdf68 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 10 Apr 2016 12:56:33 +0300 Subject: minor fix --- trex_book.asciidoc | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index b58651ca..6de7eb5f 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -1704,8 +1704,28 @@ enabled=0 There are lots of Linux tutorials on the web, so this will not be full tutorial, only highlighting some key points. Commands were checked on Ubuntu system. +For this example: + +1. TRex Client side network is 16.0.0.x +2. TRex Server side network is 48.0.0.x +3. Linux Client side network eth0 is configured with IPv4 as 172.168.0.1 +4. Linux Server side network eth1 is configured with IPv4 as 10.0.0.1 + +[source,bash] +---- + + TRex-0 (16.0.0.1->48.0.0.1 ) <--> + + ( 172.168.0.1/255.255.0.0)-eth0 [linux] -( 10.0.0.1/255.255.0.0)-eth1 + + <--> TRex-1 (16.0.0.1<-48.0.0.1) + +---- + + ==== Enable forwarding One time (will be discarded after reboot): + + [source,bash] ---- echo 1 > /proc/sys/net/ipv4/ip_forward -- cgit 1.2.3-korg From e09be57ed58521655a42637c01e1caf1454cf6c6 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 10 Apr 2016 17:53:05 +0300 Subject: Update UCS recommendation --- trex_book.asciidoc | 49 +++++++++++++++++++++++++++++++++++++----------- trex_book_basic.asciidoc | 2 -- 2 files changed, 38 insertions(+), 13 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 6de7eb5f..d8b9591e 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -2,7 +2,7 @@ TRex ==== :author: hhaim :email: -:revnumber: 1.88 +:revnumber: 2.0 :quotes.++: :numbered: :web_server_url: http://trex-tgn.cisco.com/trex @@ -79,42 +79,59 @@ TRex curretly works on x86 architecture and can operates well on Cisco UCS hardw ===================================== A high-end UCS platform is not required for operating TRex in its current version, but may be required for future versions. ===================================== + +[NOTE] +===================================== +Not all supported DPDK interfaces are supported by TRex +===================================== .Preferred UCS [options="header",cols="1,3"] |================= | UCS Type | Comments -| UCS C220 M3/M4 | Supports up to 40Gb/sec with 540-D2 and with newer Intel NIC 80Gb/sec with 1RU, recommended +| UCS C220 M3/M4 | *Prefered, Low-End*, Supports up to 40Gb/sec with 540-D2 and with newer Intel NIC 80Gb/sec with 1RU, recommended | UCS C200| Early UCS model | UCS C210 M2 | Supports up to 40Gb/sec PCIe3.0 -| UCS C240 M3 | Supports up to 200Gb/sec using Intel XL710 NICS +| UCS C240 M3/M4 | *Prefered, High-End* Supports up to 200Gb/sec. 6x XL710 NICS (PCIex8) or 2xFM10K (PCIex16) | UCS C260M2 | Supports up to 30Gb/sec due to V2 PCIe. |================= -.Internal Components +.Internal Components Low-End C220M4 +[options="header",cols="1,2",width="60%"] +|================= +| Components | Details +| CPU | 2x CPU E5-2620/2.0 GHz +| CPU Configuration | 2-Socket CPU configurations (can also work with one CPU) +| Memory | 2x4 banks for each CPU. Total of 8 BANKS ==> 32GB +| NO RAID | NO RAID +|================= + +.Internal Components High-End C240M4 [options="header",cols="1,2",width="60%"] |================= | Components | Details -| CPU | 2x CPU E5-2620 +| CPU | 2x CPU E5-2667 /3.20 GHz +| PCIe | 1x ,Riser PCI expantion card option A PID UCSC-PCI-1A-240M4 this will give the option to have two PCIex16 | CPU Configuration | 2-Socket CPU configurations (can also work with one CPU) | Memory | 2x4 banks for each CPU. Total of 8 BANKS ==> 32GB | NO RAID | NO RAID |================= -.Intel NICS supported +.Supported NICS [options="header",cols="1,1,2",width="50%"] |================= | Bandwidth | Chipset | Example | 1Gb/sec | Intel I350 | Intel 4x1GE 350-T4 NIC -| 10Gb/sec | Intel 82599 | Intel x520-D2 Cisco Order tool 2X Intel N2XX-AIPCI01, Intel X520 Dual Port 10Gb SFP+ Adapter -| 40Gb/sec | Intel XL710 Intel X710 | QSFP+ (copper/optical), SFP+ +| 10Gb/sec | Intel 82599| Intel x520-D2 Cisco Order tool 2X Intel N2XX-AIPCI01, Intel X520 Dual Port 10Gb SFP+ Adapter +| 10Gb/sec | Intel X710 | SFP+, *Preferred* support per stream stats in hardware +| 40Gb/sec | Intel XL710 | QSFP+ (copper/optical) +| 100Gb/sec | Intel Intel FM10420 | QSFP28, by Silicon link:http://www.silicom-usa.com/100_Gigabit_Dual_Port_Fiber_Ethernet_PCI_Express_PE3100G2DQiR_96[PE3100G2DQiR_96] *under dev* | VMXNET / + VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch | E1000 | paravirtualize | vmWare/KVM/VirtualBox |================= - .XL710 QSFP+ support [options="header",cols="1,1",width="70%"] |================= @@ -127,7 +144,17 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch | Active QSFP+ Copper Cables | Cisco QSFP-4SFP10G-CU link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] |================= +[NOTE] +===================================== For Intel XL710 NICS, Cisco SR4/LR QSFP+ won't work +===================================== + +.FM10K QSFP28 support +[options="header",cols="1,1",width="70%"] +|================= +| QSFP28 | Example +| todo | todo +|================= [IMPORTANT] @@ -150,7 +177,7 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc ===================================== -.Sample order for UCSC-C220-M3S with 4x10Gb ports +.Sample order for low-end UCSC-C220-M3S with 4x10Gb ports [options="header",cols="2,1^",width="50%"] |================= | Component | Amount @@ -168,7 +195,7 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch | UCSC-RAIL1 | 1 |======================== -NOTE: You should buy seperatly the 10Gb/sec SFP+, Cisco would be fine with TRex ( but not for plain Linux driver ). +NOTE: You should buy seperatly the 10Gb/sec SFP+, Cisco would be fine with TRex (but not for plain Linux driver). === Install OS diff --git a/trex_book_basic.asciidoc b/trex_book_basic.asciidoc index 105a5a1d..fd2db450 100755 --- a/trex_book_basic.asciidoc +++ b/trex_book_basic.asciidoc @@ -3191,8 +3191,6 @@ So if the m is set as 1, the total PPS is : 102*2+50*20 = 1204 PPS. The BPS depends on the packet size. You can refer to your packet size and get the BPS = PPS*Packet_size. ==== Client/Server IP allocation - -We have several features under development for IP allocation. - *1) per-template generator* -- cgit 1.2.3-korg From 254c1e2606daf11d42c04b9c3e2c354706af97c5 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 13 Apr 2016 16:03:37 +0300 Subject: mrp comments --- trex_book.asciidoc | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index d8b9591e..cc1c8fc1 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -447,7 +447,7 @@ zmq publisher at: tcp://*:4500 -Global stats enabled Cpu Utilization : 0.0 % <12> 29.7 Gb/core <13> - Platform_factor : 1.0 + Platform_factor : 1.0 Total-Tx : 867.89 Kbps <2> Total-Rx : 867.86 Kbps <3> Total-PPS : 1.64 Kpps @@ -457,8 +457,8 @@ zmq publisher at: tcp://*:4500 Expected-CPS : 1.00 cps <10> Expected-BPS : 1.36 Kbps <11> - Active-flows : 0 <6> Clients : 510 Socket-util : 0.0000 % - Open-flows : 1 <7> Servers : 254 Socket : 1 Socket/Clients : 0.0 + Active-flows : 0 <6> Clients : 510 Socket-util : 0.0000 % + Open-flows : 1 <7> Servers : 254 Socket : 1 Socket/Clients : 0.0 drop-rate : 0.00 bps <8> current time : 5.3 sec test duration : 94.7 sec @@ -488,6 +488,21 @@ zmq publisher at: tcp://*:4500 <13> Gb/sec generated per core of DP. Higer is better. <14> Rx and latency thread CPU utilization. + +More statistic information: + +*socket*:: same as the active flows. + +*Socket/Clients*:: is equal active_flows/#clients, average of active flow per client. + +*Socket-util*:: is equal to ~(100*active_flows/#clients)/64K equal to (average active flows per client*100/64K ) in words, it give an estimation of how many socket ports are used per client IP. Utilization of more than 50% means that TRex is generating too many flows per one client and you need to add more clients. + +*Max window*:: shows a momentary maximum latency for a time window of 500msec. There are a few numbers per number of windows that are shown. + The new number (the last 500msec) is the right number. The oldest in the left number. This can help to identify spikes of high latency that after a time clear.in a contrast the maximum latency will stuck at the maximum value for all the test. + +*Platform_factor*:: There are cases that we duplicate the traffic using splitter/Switch and we would like all the number to be multiplied by this factor (e.g. x2) + + WARNING: If you don't see rx packets, revisit your MAC address configuration. ==== Running TRex for the first time with ESXi: @@ -935,15 +950,17 @@ Cpu Utilization : 0.1 % Rx Check stats enabled <2> ------------------------------------------------------------------------------------------- - rx check: avg/max/jitter latency, 94 , 744, 49 | 252 287 309 <3> + rx check: avg/max/jitter latency, 94 , 744, 49 | 252 287 309 <3> - active flows: 10, fif: 308, drop: 0, errors: 0 <4> + active flows: <6> 10, fif: <5> 308, drop: 0, errors: 0 <4> ------------------------------------------------------------------------------------------- ---- <1> CPU% of the Rx thread. If it is too high *increase* the sample rate. <2> Rx Check section. For more detailed info, press 'r' during the test or at the end of the test. <3> Average latency, max latency, jitter on the template flows in microseconds. This is usually *higher* than the latency check packet because the feature works more on this packet. <4> Drop counters and errors counter should be zero. If not, press 'r' to see the full report or view the report at the end of the test. +<5> First in flow (fif)- number of new flows handled by rx thread +<6> active flows - number of active flows handled by rx thread .Full report by pressing 'r' [source,python] @@ -960,7 +977,7 @@ Cpu Utilization : 0.1 % cnt : 2 high_cnt : 2 max_d_time : 1041 usec - sliding_average : 1 usec + sliding_average : 1 usec <3> precent : 100.0 % histogram ----------- @@ -990,6 +1007,7 @@ Cpu Utilization : 0.1 % ---- <1> Any errors shown here <2> Error per template info +<3> low pass filter on the active average of latency events *Limitation:*:: -- cgit 1.2.3-korg From 467dedb7dff7260b98ec0f8884e7617d32d1f85f Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 14 Apr 2016 14:01:24 +0300 Subject: add x710 NIC recommendation --- trex_book.asciidoc | 16 ++++++++++++++-- visio_drawings/stl_streams_example.vsd | Bin 183296 -> 353280 bytes 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index cc1c8fc1..640529c9 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -124,7 +124,7 @@ Not all supported DPDK interfaces are supported by TRex | Bandwidth | Chipset | Example | 1Gb/sec | Intel I350 | Intel 4x1GE 350-T4 NIC | 10Gb/sec | Intel 82599| Intel x520-D2 Cisco Order tool 2X Intel N2XX-AIPCI01, Intel X520 Dual Port 10Gb SFP+ Adapter -| 10Gb/sec | Intel X710 | SFP+, *Preferred* support per stream stats in hardware +| 10Gb/sec | Intel X710 | SFP+, *Preferred* support per stream stats in hardware link:http://www.silicom-usa.com/PE310G4i71L_Quad_Port_Fiber_SFP+_10_Gigabit_Ethernet_PCI_Express_Server_Adapter_49[PE310G4i71L] | 40Gb/sec | Intel XL710 | QSFP+ (copper/optical) | 100Gb/sec | Intel Intel FM10420 | QSFP28, by Silicon link:http://www.silicom-usa.com/100_Gigabit_Dual_Port_Fiber_Ethernet_PCI_Express_PE3100G2DQiR_96[PE3100G2DQiR_96] *under dev* | VMXNET / + @@ -132,6 +132,18 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch | E1000 | paravirtualize | vmWare/KVM/VirtualBox |================= +.X710 SFP+ support (*for Silicom PE310G4i71L with Open Optic*) +[options="header",cols="1,1",width="70%"] +|================= +| SFP+ | Example +| Cisco SFP-10G-SR | see link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[here] +| Cisco SFP-10G-LR | see link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[here] +| Cisco SFP-H10GB-CU1M | see link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[here] +| Cisco SFP-10G-AOC1M | see link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[here] +|================= + +WARNING: Intel X710 NIC for example FH X710DA4FHBLK will work *only* with Intel SFP+. In case you like an open optic buy Silicom PE310G4i71L NIC + .XL710 QSFP+ support [options="header",cols="1,1",width="70%"] |================= @@ -146,7 +158,7 @@ VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch [NOTE] ===================================== - For Intel XL710 NICS, Cisco SR4/LR QSFP+ won't work + For Intel XL710 NICS, Cisco SR4/LR QSFP+ won't work you can buy Silicom with Open Optic ===================================== .FM10K QSFP28 support diff --git a/visio_drawings/stl_streams_example.vsd b/visio_drawings/stl_streams_example.vsd index b947c3e8..7d198609 100644 Binary files a/visio_drawings/stl_streams_example.vsd and b/visio_drawings/stl_streams_example.vsd differ -- cgit 1.2.3-korg From 24ae381b647cb1cc494f28b0839cd26cbf79e7ee Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 14 Apr 2016 15:27:57 +0300 Subject: Silicon/Silicom typo --- trex_book.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 640529c9..9aaf9487 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -126,7 +126,7 @@ Not all supported DPDK interfaces are supported by TRex | 10Gb/sec | Intel 82599| Intel x520-D2 Cisco Order tool 2X Intel N2XX-AIPCI01, Intel X520 Dual Port 10Gb SFP+ Adapter | 10Gb/sec | Intel X710 | SFP+, *Preferred* support per stream stats in hardware link:http://www.silicom-usa.com/PE310G4i71L_Quad_Port_Fiber_SFP+_10_Gigabit_Ethernet_PCI_Express_Server_Adapter_49[PE310G4i71L] | 40Gb/sec | Intel XL710 | QSFP+ (copper/optical) -| 100Gb/sec | Intel Intel FM10420 | QSFP28, by Silicon link:http://www.silicom-usa.com/100_Gigabit_Dual_Port_Fiber_Ethernet_PCI_Express_PE3100G2DQiR_96[PE3100G2DQiR_96] *under dev* +| 100Gb/sec | Intel Intel FM10420 | QSFP28, by Silicom link:http://www.silicom-usa.com/100_Gigabit_Dual_Port_Fiber_Ethernet_PCI_Express_PE3100G2DQiR_96[PE3100G2DQiR_96] *under dev* | VMXNET / + VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch | E1000 | paravirtualize | vmWare/KVM/VirtualBox -- cgit 1.2.3-korg From 0397f5bf480ae77fcf3169c345a840dcc5d9107a Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 14 Apr 2016 19:59:18 +0300 Subject: v2.00 --- images/Thumbs.db | Bin 561152 -> 590336 bytes release_notes.asciidoc | 24 ++++++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/images/Thumbs.db b/images/Thumbs.db index 9fb5649d..a7cb4a90 100755 Binary files a/images/Thumbs.db and b/images/Thumbs.db differ diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 09dbeced..23988280 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -24,6 +24,30 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 2.00 == + +* Console +** Support partial port acquire using new CLI switch `-a ACQUIRE` (first phase) +** Add tx/rx graphs +* Python API: add an API for reading events as warning/errors +* HLTAPI support for per stream stats +* support VALN mode for per stream stats for 82599 using `--vlan` switch at server invocation +* A peek into TRex stateless GUI version for evaluation still without many features like packet builder, advance packet builder, per stream stats link:https://www.dropbox.com/s/vs9gojtdc5ewv05/setupCiscoTrex1.96-SNAPSHOT.exe?dl=0[TRex Stateless GUI Download] +** Only pcap file packet builder is supported in this version + +image::images/trex_stl_gui.png[title="TRex Stateless GUI",align="left",width=600, link="images/trex_stl_gui.png"] + + +=== fix issues: === + +* X710/XL710 per stream hardware stats +** link:http://trex-tgn.cisco.com/youtrack/issue/trex-199[trex-199] +** Fix issue of RX bytes +** Fix issue with mbuf leak +* Packet Memory shortage fix link:http://trex-tgn.cisco.com/youtrack/issue/trex-197[trex-197] +* Python Examples - move all examples to be 16.0.0.x/48.0.0.x for some refactor + + == Release 1.99 == -- cgit 1.2.3-korg From 18a43ac4ceedf7374f92d7462d8cb81ab2be72e2 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 17 Apr 2016 11:58:52 +0300 Subject: David comment --- trex_book.asciidoc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 9aaf9487..7696ab65 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -400,14 +400,16 @@ and example ---- - port_limit : 2 version : 2 - interfaces : ["03:00.0","03:00.1"] + interfaces : ["03:00.0","03:00.1"] <2> port_info : # set eh mac addr - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] # port 0 src_mac : [0x2,0x0,0x0,0x2,0x0,0x00] <1> - dest_mac : [0x2,0x0,0x0,0x2,0x0,0x00] # port 1 <1> src_mac : [0x1,0x0,0x0,0x1,0x0,0x00] ---- -<1> source mac is like destination mac (this should be set or taken from vmware). the mac was taken from hypervisor +<1> Source mac is like destination mac (this should be set or taken from vmware). the mac was taken from hypervisor +<2> Currently TRex has a limitation and support only one type of NIC at a time. You can't mix different type of NIC in one config file. see here for more info link:http://trex-tgn.cisco.com/youtrack/issue/trex-197[trex-201] + ==== Running TRex -- cgit 1.2.3-korg From 4821132c77fe3ca1c4dee2d410e981eefd299630 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 17 Apr 2016 13:33:42 +0300 Subject: add ARP example --- images/trex_stl_gui.png | Bin 0 -> 51441 bytes trex_stateless.asciidoc | 76 +++++++++++++++++++++++++++++++++ visio_drawings/stl_streams_example.vsd | Bin 353280 -> 376832 bytes 3 files changed, 76 insertions(+) create mode 100644 images/trex_stl_gui.png diff --git a/images/trex_stl_gui.png b/images/trex_stl_gui.png new file mode 100644 index 00000000..59764b96 Binary files /dev/null and b/images/trex_stl_gui.png differ diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 215e2283..303de5c5 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -1876,6 +1876,82 @@ class STLS1(object): <2> Writes the stream variable `mac_src` with an offset determined by the offset of `IP.src` plus the `offset_fixup` of 2. +==== Tutorial: Field Engine, many clients with ARP + +In the following example, there are two Switchs SW1 and SW2. +The TRex port 0 is connected to SW1 and TRex port 1 is connected to SW2. +There are 253 hosts connected to SW1 and SW2 with two networks ports. + +.Client side the network of the hosts +[cols="3<,3<", options="header",width="50%"] +|================= +| Name | Description +| TRex port 0 MAC | 00:00:01:00:00:01 +| TRex port 0 IPv4 | 16.0.0.1 +| IPv4 host client side range | 16.0.0.2-16.0.0.254 +| MAC host client side range | 00:00:01:00:00:02-00:00:01:00:00:FE +|================= + + +.Server side the network of the hosts +[cols="3<,3<", options="header",width="50%"] +|================= +| Name | Description +| TRex port 1 MAC | 00:00:02:00:00:01 +| TRex port 1 IPv4 | 48.0.0.1 +| IPv4 host server side range | 48.0.0.2-48.0.0.254 +| MAC host server side range | 00:00:02:00:00:02-00:00:02:00:00:FE +|================= + +image::images/stl_arp.png[title="arp/nd",align="left",width={p_width}, link="images/stl_arp.png"] + +In the following example, there are two Switchs SW1 and SW2. +The TRex port 0 is connected to SW1 and TRex port 1 is connected to SW2 +In this example, because there are many hosts connected to the same network using SW1 and not as a next hope we would like to teach SW1 the MAC addresses of the hosts and not to send the traffic directly to the hosts MAC (as it in any case known) +For that we would send an ARP to all the hosts (16.0.0.2-16.0.0.254) from TRex port 0 and Gratius ARP from server side (48.0.0.1) TRex port 1 as the first stage of the test + +So the step would be like that: + +1. Send a gratuitous ARP from TRex port 1 with server IP/MAC (48.0.0.1) after this stage SW2 will know that 48.0.0.1 is located after this port of SW2. +2. Send ARP request for all hosts from port 0 with a range of 16.0.0.2-16.0.0.254 after this stage all switch ports will learn the PORT/MAC locations. Without this stage the first packets from TRex port 0 will be flooded to all Switch ports. +3. send traffic from TRex0->clients, port 1->servers + + +.ARP traffic profile +[source,python] +---- + + base_pkt = Ether(dst="ff:ff:ff:ff:ff:ff")/ + ARP(psrc="16.0.0.1",hwsrc="00:00:01:00:00:01", pdst="16.0.0.2") <1> + + vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src", min_value=2, max_value=254, size=2, op="inc"), <2> + STLVmWrFlowVar(fv_name="mac_src" ,pkt_offset="ARP.pdst",offset_fixup=2), + ] + ,split_by_field = "mac_src" # split + ) + + +---- +<1> ARP packet with TRex port 0 MAC and IP and pdst as variable. +<2> Write it to `ARP.pdst`. + + +.Gratuitous ARP traffic profile +[source,python] +---- + + base_pkt = Ether(src="00:00:02:00:00:01",dst="ff:ff:ff:ff:ff:ff")/ + ARP(psrc="48.0.0.1",hwsrc="00:00:02:00:00:01", + hwdst="00:00:02:00:00:01", pdst="48.0.0.1") <1> + +---- +<1> G ARP packet with TRex port 1 MAC and IP no need a VM. + +[NOTE] +===================================================================== +This principal can be done for IPv6 too. ARP could be replaced with Neighbor Solicitation IPv6 packet. +===================================================================== + ==== Tutorial: Field Engine, split to core The following example splits generated traffic into a number of threads. You can specify the field to use for determining how to split the traffic into threads. Without this feature, the traffic is duplicated and all the threads transmit the same traffic. (See the results tables in the examples below in this tutorial.) diff --git a/visio_drawings/stl_streams_example.vsd b/visio_drawings/stl_streams_example.vsd index 7d198609..9029b0a1 100644 Binary files a/visio_drawings/stl_streams_example.vsd and b/visio_drawings/stl_streams_example.vsd differ -- cgit 1.2.3-korg From 98af8f62936c774026174cd391402cf58b5f9214 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 17 Apr 2016 13:34:12 +0300 Subject: add arp png --- images/stl_arp.png | Bin 0 -> 39982 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/stl_arp.png diff --git a/images/stl_arp.png b/images/stl_arp.png new file mode 100644 index 00000000..4cba216a Binary files /dev/null and b/images/stl_arp.png differ -- cgit 1.2.3-korg From c3d81593be59491e478f22f08386dca49b092ab7 Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Mon, 18 Apr 2016 10:15:40 +0300 Subject: English fixes to running for first time with router --- trex_book.asciidoc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 7696ab65..2326f3ec 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -554,13 +554,13 @@ image:images/passthrough_adding.png[title="passthrough_adding"] ==== Running TRex for the first time with router -You can follow this presentation link:trex_config_guide.html[first time TRex configuration] +You can follow this presentation: link:trex_config_guide.html[first time TRex configuration] //TBD: Note that the link does not work correctly in PDF rendition or continue reading. -TRex set source-mac of all port to `00:00:00:01:00:00` and expected to get to this MAC address `00:00:00:01:00:00` without a config file. -so you just need to configure router with the right MAC address. +Without config file, TRex sets source MAC of all ports to `00:00:00:01:00:00` and expects to receive packets with this destination MAC address. +So, you just need to configure your router with static ARP entry pointing to the above MAC address. -NOTE: Virtual routers on ESXi (for example, Cisco CSR1000v) must have a distinct MAC address for each port. Specify the address in the configuration file. see more xref:trex_config[here]. Another example is where the TRex is connected to a switch. In that case each of TRex port should have a distinc MAC address. +NOTE: Virtual routers on ESXi (for example, Cisco CSR1000v) must have distinct MAC address for each port. You need to specify the addresses in the configuration file. see more xref:trex_config[here]. Another example is TRex connected to a switch. In this case, each one of the TRex ports should have distinct MAC address. include::trex_book_basic.asciidoc[] -- cgit 1.2.3-korg From 7d454b28446aa8b1536c348f26585e86ca2ea80c Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 18 Apr 2016 13:41:27 +0300 Subject: add test folder --- ws_main.py | 5 +++++ wscript | 3 +++ 2 files changed, 8 insertions(+) diff --git a/ws_main.py b/ws_main.py index 5d9fd8cc..849e68fa 100644 --- a/ws_main.py +++ b/ws_main.py @@ -375,6 +375,11 @@ def publish_ext(bld): os.system('rsync -avz -e "ssh -i %s" --rsync-path=/usr/bin/rsync %s %s@%s:%s/doc/' % (Env().get_trex_ex_web_key(),from_, Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path() ) ) +def publish_test(bld): + # copy all the files to our web server + remote_dir = "%s:%s" % ( Env().get_local_web_server(), Env().get_remote_release_path ()+'../test/') + os.system('rsync -av --rsh=ssh build/ %s' % (remote_dir)) + diff --git a/wscript b/wscript index 2359786f..f935668f 100755 --- a/wscript +++ b/wscript @@ -40,6 +40,9 @@ def release(bld): def publish(bld): ws_main.publish(bld) +def publish_test(bld): + ws_main.publish_test(bld) + def publish_ext(bld): ws_main.publish_ext(bld) -- cgit 1.2.3-korg From 6c9c498860be3e96bd85834bea0c6c02907cad4a Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 20 Apr 2016 16:16:06 +0300 Subject: add new TOC --- images/Thumbs.db | Bin 590336 -> 601088 bytes images/icons/selected_tab_bg.png | Bin 0 -> 124 bytes images/icons/toggle.png | Bin 0 -> 135 bytes trex_book.asciidoc | 1 + trex_rpc_server_spec-docinfo.html | 6 + trex_rpc_server_spec.asciidoc | 1 + trex_stateless.asciidoc | 2 + trex_toc.asciidoc | 221 +++++++++++++++++++++++++++++++++++++ trex_vm_manual-docinfo.html | 6 + trex_vm_manual.asciidoc | 19 +++- ws_main.py | 225 +++++++++++++++++++++++++++++++++++--- wscript | 3 + 12 files changed, 463 insertions(+), 21 deletions(-) create mode 100644 images/icons/selected_tab_bg.png create mode 100644 images/icons/toggle.png create mode 100644 trex_rpc_server_spec-docinfo.html create mode 100644 trex_toc.asciidoc create mode 100644 trex_vm_manual-docinfo.html diff --git a/images/Thumbs.db b/images/Thumbs.db index a7cb4a90..893347af 100755 Binary files a/images/Thumbs.db and b/images/Thumbs.db differ diff --git a/images/icons/selected_tab_bg.png b/images/icons/selected_tab_bg.png new file mode 100644 index 00000000..16bb5d61 Binary files /dev/null and b/images/icons/selected_tab_bg.png differ diff --git a/images/icons/toggle.png b/images/icons/toggle.png new file mode 100644 index 00000000..84380cd0 Binary files /dev/null and b/images/icons/toggle.png differ diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 2326f3ec..f13c63ae 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -10,6 +10,7 @@ TRex :toclevels: 4 include::trex_ga.asciidoc[] +include::trex_toc.asciidoc[] == Introduction diff --git a/trex_rpc_server_spec-docinfo.html b/trex_rpc_server_spec-docinfo.html new file mode 100644 index 00000000..6fb66a5e --- /dev/null +++ b/trex_rpc_server_spec-docinfo.html @@ -0,0 +1,6 @@ + + + + + + diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 7df63553..1663830b 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -10,6 +10,7 @@ The TRex RPC Server :toclevels: 4 include::trex_ga.asciidoc[] +include::trex_toc.asciidoc[] == Change log diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 303de5c5..4aad01e6 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -22,6 +22,8 @@ ifdef::backend-xhtml11[] endif::backend-xhtml11[] include::trex_ga.asciidoc[] +include::trex_toc.asciidoc[] + == Audience diff --git a/trex_toc.asciidoc b/trex_toc.asciidoc new file mode 100644 index 00000000..664370b9 --- /dev/null +++ b/trex_toc.asciidoc @@ -0,0 +1,221 @@ + +ifdef::backend-xhtml11[] +++++ +
+ +
+
+ Table of Contents +
+ + +
+ + + + + + + + + + + + + + + + + + + + + +++++ +endif::backend-xhtml11[] + diff --git a/trex_vm_manual-docinfo.html b/trex_vm_manual-docinfo.html new file mode 100644 index 00000000..6fb66a5e --- /dev/null +++ b/trex_vm_manual-docinfo.html @@ -0,0 +1,6 @@ + + + + + + diff --git a/trex_vm_manual.asciidoc b/trex_vm_manual.asciidoc index 7e22d54a..ec4c39c5 100755 --- a/trex_vm_manual.asciidoc +++ b/trex_vm_manual.asciidoc @@ -8,6 +8,8 @@ TRex Virtual Machine setup and basic usage include::trex_ga.asciidoc[] +include::trex_toc.asciidoc[] + == Introduction @@ -276,12 +278,14 @@ Once we have TRex up and running, we can enjoy the benefit of having live monito This can be easily done by following these steps: - 0. Download the latest version of TrexViewer application and install it using http://trex-tgn.cisco.com/trex/client_gui/setup.exe[this link]. - 1. Start the application and fill in the following: + +1. Download the latest version of TrexViewer application and install it using http://trex-tgn.cisco.com/trex/client_gui/setup.exe[this link]. + + +2. Start the application and fill in the following: + - Trex ip: `127.0.0.1:4500` - 2. Click the play button. +3. Click the play button. ifdef::backend-docbook[] image::images/trex_motinor_config.png[title="TRex viewer start screen",align="center",width=400,link="images/trex_motinor_config.png"] @@ -291,9 +295,7 @@ ifdef::backend-xhtml11[] image::images/trex_motinor_config.png[title="TRex viewer start screen",align="center",width=900,link="images/trex_motinor_config.png"] endif::backend-xhtml11[] - - - 3. **That's it!** + +**That's it!** + Now the live data from TRex will be displayed on the screen. ifdef::backend-docbook[] @@ -328,3 +330,8 @@ The VM runs TRex with single client and single server port. The traffic generate TRex identifies only the packets which were dedicately sent by one of those traffic ports and receives them in the other port. Hence, packets generated by client port will be received by the server port and vice versa. Ontop, network adapter #4 used to [underline]#listen# to all traffic generated by both of TRex's ports, therefore it is very useful in providing live data of the generated flows over the network. + + + + + diff --git a/ws_main.py b/ws_main.py index 849e68fa..73b35c51 100644 --- a/ws_main.py +++ b/ws_main.py @@ -12,11 +12,155 @@ APPNAME='wafdocs' import os, re, shutil import shlex import subprocess +import json + top = '.' out = 'build' +from HTMLParser import HTMLParser + +class CTocNode: + def __init__ (self): + self.name="root" + self.level=1; # 1,2,3,4 + self.parent=None + self.childs=[]; # link to CTocNode + + def get_link (self): + name=self.name + l=name.split('.'); + l=l[-1].lower() + s=''; + for c in l: + if c.isalpha() or c.isspace(): + s+=c + + return '#_'+'_'.join(s.lower().split()); + + + def add_new_child (self,name,level): + n=CTocNode(); + n.name=name; + n.level=level; + n.parent=self; + self.childs.append(n); + return n + + def to_json_childs (self): + l=[] + for obj in self.childs: + l.append(obj.to_json()); + return (l); + + def to_open (self): + if self.level <3: + return True + else: + return False + + + def to_json (self): + d={"text" : self.name, + "link" : self.get_link(), + "state" : { + "opened" : self.to_open() + } + } + if len(self.childs)>0 : + d["children"]= self.to_json_childs() + return d + + + +class TocHTMLParser(HTMLParser): + + def __init__ (self): + HTMLParser.__init__(self); + self.state=0; + self.root=CTocNode() + self.root.parent=self.root + self.level=2; + self.d={}; + self.last_level=1 + self.set_level(1,self.root) + + + def set_level (self,level,node): + assert(node!=None); + assert(isinstance(node,CTocNode)==True); + self.d[str(level)]=node + + # in case we change from high to low level remove the higher level + if level1): + return (True); + + def handle_starttag(self, tag, attrs): + if self.is_header (tag): + self.state=True; + self.level=int(tag[1]); + + def handle_endtag(self, tag): + if self.is_header (tag): + self.state=False; + + + def handle_data(self, data): + if self.state: + level=self.level + + cnode=self.get_level(level-1) + + n=cnode.add_new_child(data,level); + assert(n!=None); + self.set_level(level,n) + self.last_level=level + + def dump_as_json (self): + return json.dumps(self.root.to_json_childs(), sort_keys=False, indent=4) + + + + +def create_toc_json (input_file,output_file): + f = open (input_file) + l=f.readlines() + f.close(); + html_input = ''.join(l) + parser = TocHTMLParser() + parser.feed(html_input); + f = open (output_file,'w') + f.write(parser.dump_as_json()); + f.close(); + + + + re_xi = re.compile('''^(include|image)::([^.]*.(asciidoc|\\{PIC\\}))\[''', re.M) def ascii_doc_scan(self): p = self.inputs[0].parent @@ -96,6 +240,47 @@ def convert_to_pdf(task): os.system('a2x --no-xmllint -v -f pdf -d article %s -D %s ' %(task.inputs[0].abspath(),out_dir ) ) return (0) + + +def toc_fixup_file (input_file, + out_file, + json_file_name + ): + + file = open(input_file) + contents = file.read() + replaced_contents = contents.replace('input_replace_me.json', json_file_name) + file = open(out_file,'w') + file.write(replaced_contents) + file.close(); + + + +def convert_to_html_toc_book(task): + + input_file = task.inputs[0].abspath() + + json_out_file = os.path.splitext(task.outputs[0].abspath())[0]+'.json' + tmp = os.path.splitext(task.outputs[0].abspath())[0]+'.tmp' + json_out_file_short = os.path.splitext(task.outputs[0].name)[0]+'.json' + + cmd='{0} -a stylesheet={1} -a icons=true -a docinfo -d book -a max-width=55em -o {2} {3}'.format( + task.env['ASCIIDOC'], + task.inputs[1].abspath(), + tmp, + task.inputs[0].abspath()); + + os.system( cmd ) + + create_toc_json(tmp,json_out_file) + + toc_fixup_file(tmp,task.outputs[0].abspath(),json_out_file_short); + + os.system('rm {0}'.format(tmp)); + + + + def convert_to_pdf_book(task): input_file = task.outputs[0].abspath() out_dir = task.outputs[0].parent.get_bld().abspath() @@ -209,6 +394,14 @@ def build_cp(bld,dir,root,callback): + + + + + + + + def build(bld): bld(rule=my_copy, target='symbols.lang') @@ -252,13 +445,6 @@ def build(bld): bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='release_notes.asciidoc waf.css', target='release_notes.html', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', - source='trex_book.asciidoc waf.css', target='trex_manual.html', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', - source='trex_stateless.asciidoc waf.css', target='trex_stateless.html', scan=ascii_doc_scan) - bld(rule='${ASCIIDOC} -a docinfo -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -d book -o ${TGT} ${SRC[0].abspath()}', source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless.html', scan=ascii_doc_scan) @@ -277,17 +463,26 @@ def build(bld): bld(rule=convert_to_pdf_book, source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.pdf', scan=ascii_doc_scan) - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', - source='trex_vm_manual.asciidoc waf.css', target='trex_vm_manual.html', scan=ascii_doc_scan) + # with nice TOC + bld(rule=convert_to_html_toc_book, + source='trex_vm_manual.asciidoc waf.css', target='trex_vm_manual.html',scan=ascii_doc_scan) + + bld(rule=convert_to_html_toc_book, + source='trex_stateless.asciidoc waf.css', target='trex_stateless.html',scan=ascii_doc_scan); + + bld(rule=convert_to_html_toc_book, + source='trex_book.asciidoc waf.css', target='trex_manual.html',scan=ascii_doc_scan); + + bld(rule=convert_to_html_toc_book, + source='trex_rpc_server_spec.asciidoc waf.css', target='trex_rpc_server_spec.html',scan=ascii_doc_scan); + + bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='vm_doc.asciidoc waf.css', target='vm_doc.html', scan=ascii_doc_scan) bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='packet_builder_yaml.asciidoc waf.css', target='packet_builder_yaml.html', scan=ascii_doc_scan) - - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', - source='trex_rpc_server_spec.asciidoc waf.css', target='trex_rpc_server_spec.html', scan=ascii_doc_scan) bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.html', scan=ascii_doc_scan) @@ -367,18 +562,18 @@ def release(bld): def publish(bld): # copy all the files to our web server remote_dir = "%s:%s" % ( Env().get_local_web_server(), Env().get_remote_release_path ()+'../doc/') - os.system('rsync -av --rsh=ssh build/ %s' % (remote_dir)) + os.system('rsync -av --del --rsh=ssh build/ %s' % (remote_dir)) def publish_ext(bld): from_ = 'build/' - os.system('rsync -avz -e "ssh -i %s" --rsync-path=/usr/bin/rsync %s %s@%s:%s/doc/' % (Env().get_trex_ex_web_key(),from_, Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path() ) ) + os.system('rsync -avz --del -e "ssh -i %s" --rsync-path=/usr/bin/rsync %s %s@%s:%s/doc/' % (Env().get_trex_ex_web_key(),from_, Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path() ) ) def publish_test(bld): # copy all the files to our web server remote_dir = "%s:%s" % ( Env().get_local_web_server(), Env().get_remote_release_path ()+'../test/') - os.system('rsync -av --rsh=ssh build/ %s' % (remote_dir)) + os.system('rsync -av --del --rsh=ssh build/ %s' % (remote_dir)) diff --git a/wscript b/wscript index f935668f..187a5528 100755 --- a/wscript +++ b/wscript @@ -24,6 +24,9 @@ def configure(conf): ws_main.configure(conf) +def create_toc (bld): + ws_main.create_toc(bld) + def build(bld): ws_main.build(bld) -- cgit 1.2.3-korg From a4c7bae950640e258ffe868091e00491e148ef9b Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 20 Apr 2016 16:51:15 +0300 Subject: fix new TOC link --- ws_main.py | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/ws_main.py b/ws_main.py index 73b35c51..276f21e8 100644 --- a/ws_main.py +++ b/ws_main.py @@ -25,24 +25,30 @@ class CTocNode: def __init__ (self): self.name="root" self.level=1; # 1,2,3,4 + self.link=None; self.parent=None self.childs=[]; # link to CTocNode def get_link (self): - name=self.name - l=name.split('.'); - l=l[-1].lower() - s=''; - for c in l: - if c.isalpha() or c.isspace(): - s+=c + if self.link==None: + name=self.name + l=name.split('.'); + l=l[-1].lower() + s=''; + for c in l: + if c.isalpha() or c.isspace(): + s+=c + + return '#_'+'_'.join(s.lower().split()); + else: + return '#'+self.link - return '#_'+'_'.join(s.lower().split()); - def add_new_child (self,name,level): + def add_new_child (self,name,level,link): n=CTocNode(); n.name=name; + n.link=link n.level=level; n.parent=self; self.childs.append(n); @@ -82,6 +88,7 @@ class TocHTMLParser(HTMLParser): self.root=CTocNode() self.root.parent=self.root self.level=2; + self.attrs=None self.d={}; self.last_level=1 self.set_level(1,self.root) @@ -122,6 +129,7 @@ class TocHTMLParser(HTMLParser): def handle_starttag(self, tag, attrs): if self.is_header (tag): + self.attrs=attrs self.state=True; self.level=int(tag[1]); @@ -129,14 +137,23 @@ class TocHTMLParser(HTMLParser): if self.is_header (tag): self.state=False; + def get_id (self): + if self.attrs: + for obj in self.attrs: + if obj[0]=='id': + return obj[1] + else: + return None + def handle_data(self, data): if self.state: + level=self.level cnode=self.get_level(level-1) - n=cnode.add_new_child(data,level); + n=cnode.add_new_child(data,level,self.get_id()); assert(n!=None); self.set_level(level,n) self.last_level=level -- cgit 1.2.3-korg From 498bba51873f0e7c46dca98a7bf2e3fda8ea253f Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 1 May 2016 18:32:44 +0300 Subject: toc minor fixes --- images/Thumbs.db | Bin 601088 -> 604160 bytes images/splitbar.png | Bin 0 -> 283 bytes images/trex_logo_toc.png | Bin 0 -> 2323 bytes trex_stateless.asciidoc | 3 +- trex_toc.asciidoc | 167 +++++++++++++++++++++++++++++++++++------------ ws_main.py | 20 +++--- 6 files changed, 137 insertions(+), 53 deletions(-) create mode 100644 images/splitbar.png create mode 100644 images/trex_logo_toc.png diff --git a/images/Thumbs.db b/images/Thumbs.db index 893347af..cc9c8e88 100755 Binary files a/images/Thumbs.db and b/images/Thumbs.db differ diff --git a/images/splitbar.png b/images/splitbar.png new file mode 100644 index 00000000..9c9c1988 Binary files /dev/null and b/images/splitbar.png differ diff --git a/images/trex_logo_toc.png b/images/trex_logo_toc.png new file mode 100644 index 00000000..eb9fa1ec Binary files /dev/null and b/images/trex_logo_toc.png differ diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 4aad01e6..ed9f6647 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -3222,9 +3222,10 @@ $command [-a] [-port 1 2 3] [-port 0xff] [-port clients/servers] port mask : [-a] : all ports [-port 1 2 3] : port 1,2 3 - [-port 0xff] : port by mask 0x1 for port 0 0x3 for port 0 and 1 + [-port 0xff] : port by mask 0x1 for port 0 0x3 for port 0 and 1 <1> [-port clients/servers] : -port clients will choose all the client side ports ---- +<7> ==== Duration diff --git a/trex_toc.asciidoc b/trex_toc.asciidoc index 664370b9..e620f9a3 100644 --- a/trex_toc.asciidoc +++ b/trex_toc.asciidoc @@ -1,17 +1,22 @@ ifdef::backend-xhtml11[] ++++ -
-
+ Table of Contents
+ +
+ +
- @@ -53,14 +58,14 @@ ifdef::backend-xhtml11[] #toc { position: fixed; - top: 0; + top: 51px; left: 0; bottom: 0; width: 18em; padding-bottom: 1.5em; margin: 0; - overflow-x: auto; - overflow-y: hidden; + overflow-x: auto !important; + overflow-y: auto !important; border-right: solid 2px #cfcfcf; background-color: #FAFAFA; white-space: nowrap; @@ -75,6 +80,13 @@ ifdef::backend-xhtml11[] background-color: #e4e2e2; padding: 8px 0px 7px 45px; white-space: nowrap; + left: 0px; + display: block; + position: fixed; + z-index: 100; + width: 245px; + top: 0px; + overflow: hidden; } #toc .toclevel1 { @@ -106,29 +118,39 @@ ifdef::backend-xhtml11[] text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); } + /* For side bar */ + .ui-resizable-e{ + height: 100%; + width: 4px !important; + position: fixed !important; + top: 0px !important; + cursor: e-resize !important; + background: url('images\splitbar.png') repeat scroll right center transparent !important; + background-image: url('images\splitbar.png'); + } + + .jstree-default .jstree-themeicon{ + display: none !important; + } + + .jstree-anchor { font-size: 12px !important; color: #91A501 !important; } - /* For side bar */ - .ui-resizable-e{ - width: 10px !important; - } .jstree-clicked{ color: green !important; } - .jstree-default .jstree-themeicon{ - display: none !important; - } #toggle { position: fixed; top: 14px; left: 10px; - z-index: 10; + z-index: 210; + width: 24px; } #toggle img { @@ -139,6 +161,11 @@ ifdef::backend-xhtml11[] opacity:0.9; } + .trex_logo{ + top: 6px; + position: relative; + } + @@ -153,9 +180,15 @@ ifdef::backend-xhtml11[] initResizable(); // Toggle TOC whe clicking on the menu icon toggleTOC(); + // Handle Mobile - close TOC + checkMobile(); function initializeNavTree() { - $('#nav-tree').jstree({ + + // TOC tree options + var toc_tree = $('#nav-tree'); + + var toc_tree_options = { 'core' : { "animation" :false, "themes" : { "stripes" : false }, @@ -166,7 +199,9 @@ ifdef::backend-xhtml11[] } , "plugins" : [ "wholerow" ] - }); + }; + + $('#nav-tree').jstree(toc_tree_options) $('#nav-tree').on("changed.jstree", function (e, data) { window.location.href = data.instance.get_selected(true)[0].original.link; @@ -174,46 +209,92 @@ ifdef::backend-xhtml11[] } function initResizable() { - var toc = $("#toc"); - var bodyLeftMargin = $("body"); - $("#toc").resizable({ - resize: function(e, ui) { - var tocWidth = $("#toc").outerWidth(); - bodyLeftMargin.css({"marginLeft":parseInt(tocWidth)+20+"px"}); - }, - handles: 'e' - }); - } + var toc = $("#toc"); + var body = $("body"); + + // On resize + $("#toc").resizable({ + resize: function(e, ui) { + var tocWidth = $(toc).outerWidth(); + body.css({"marginLeft":parseInt(tocWidth)+20+"px"}); + $(".ui-resizable-e").css({"right":$(window).width()-parseInt(tocWidth)+"px"}); + $("#toctitle").css({"width":parseInt(tocWidth)-45+"px"}); + }, + handles: 'e' + }); + + // Do it for the first time + var tocWidth = $(toc).outerWidth(); + $(".ui-resizable-e").css({"right":$(window).width()-parseInt(tocWidth)+"px"}); + $("#toctitle").css({"width":parseInt(tocWidth)-45+"px"}); + } function toggleTOC(){ var isOpen = true; $( "#toggle" ).click(function() { if ( isOpen ) { // Close it - $("#toc").hide("slide", 500); - // Show the show/hide button - $("#toggle").css("right", "-40px"); - // $("body").css("margin-left", "20px"); - $("body").animate({ - "margin-left": "50px" - }, 500); + closTOC(); } else { // Open it - $("#toc").show("slide", 500); - // Show the show/hide button - $("#toggle").css("right", "15px"); - // $("body").css("margin-left", $(toc).outerWidth()+20+"px"); - $("body").animate({ - "margin-left": $(toc).outerWidth()+20+"px" - }, 500); + openTOC(); } + // Toggle status isOpen = !isOpen; }); } - } - ) + + // Close TOC by default if it is mobile + function checkMobile(){ + if(isMobileDevice()){ + closTOC(); + } + } + + // Check it it it is running on mobile device + function isMobileDevice() { + if( + navigator.userAgent.match(/Android/i) || + navigator.userAgent.match(/BlackBerry/i) || + navigator.userAgent.match(/iPhone|iPad|iPod/i) || + navigator.userAgent.match(/Opera Mini/i) || + navigator.userAgent.match(/IEMobile/i) || + navigator.userAgent.match(/iPhone|iPad|iPod/i) + ) + { + return true; + } + else + { + return false; + } + } + + // Close TOC + function closTOC(){ + $("#toc").hide("slide", 500); + $("#toctitle").hide("slide", 500); + $(".ui-resizable-e").hide("slide", 500); + // Show the show/hide button + $("#toggle").css("right", "-40px"); + // Fil width + $("body").animate({"margin-left": "50px"}, 500); + } + + // Open TOC + function openTOC(){ + $("#toc").show("slide", 500); + $("#toctitle").show("slide", 500); + $(".ui-resizable-e").show("slide", 500); + // Show the show/hide button + $("#toggle").css("right", "15px"); + // Minimize page width + $("body").animate({"margin-left": $(toc).outerWidth()+20+"px"}, 500); + } + + }); ++++ diff --git a/ws_main.py b/ws_main.py index 276f21e8..11cca9f2 100644 --- a/ws_main.py +++ b/ws_main.py @@ -254,8 +254,7 @@ def configure(conf): def convert_to_pdf(task): input_file = task.outputs[0].abspath() out_dir = task.outputs[0].parent.get_bld().abspath() - os.system('a2x --no-xmllint -v -f pdf -d article %s -D %s ' %(task.inputs[0].abspath(),out_dir ) ) - return (0) + return os.system('a2x --no-xmllint -v -f pdf -d article %s -D %s ' %(task.inputs[0].abspath(),out_dir ) ) @@ -280,20 +279,22 @@ def convert_to_html_toc_book(task): json_out_file = os.path.splitext(task.outputs[0].abspath())[0]+'.json' tmp = os.path.splitext(task.outputs[0].abspath())[0]+'.tmp' json_out_file_short = os.path.splitext(task.outputs[0].name)[0]+'.json' - - cmd='{0} -a stylesheet={1} -a icons=true -a docinfo -d book -a max-width=55em -o {2} {3}'.format( + + cmd='{0} -a stylesheet={1} -a icons=true -a docinfo -d book -a max-width=55em -o {2} {3}'.format( task.env['ASCIIDOC'], task.inputs[1].abspath(), tmp, task.inputs[0].abspath()); - os.system( cmd ) + res= os.system( cmd ) + if res !=0 : + return (1) create_toc_json(tmp,json_out_file) toc_fixup_file(tmp,task.outputs[0].abspath(),json_out_file_short); - os.system('rm {0}'.format(tmp)); + return os.system('rm {0}'.format(tmp)); @@ -301,8 +302,7 @@ def convert_to_html_toc_book(task): def convert_to_pdf_book(task): input_file = task.outputs[0].abspath() out_dir = task.outputs[0].parent.get_bld().abspath() - os.system('a2x --no-xmllint -v -f pdf -d book %s -D %s ' %(task.inputs[0].abspath(),out_dir ) ) - return (0) + return os.system('a2x --no-xmllint -v -f pdf -d book %s -D %s ' %(task.inputs[0].abspath(),out_dir ) ) def ensure_dir(f): @@ -494,7 +494,6 @@ def build(bld): source='trex_rpc_server_spec.asciidoc waf.css', target='trex_rpc_server_spec.html',scan=ascii_doc_scan); - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='vm_doc.asciidoc waf.css', target='vm_doc.html', scan=ascii_doc_scan) @@ -594,6 +593,9 @@ def publish_test(bld): +def publish_both(bld): + publish(bld) + publish_ext(bld) -- cgit 1.2.3-korg From 9822c0b5421c81d50c30484ec643069b86ac0b1d Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 2 May 2016 14:12:20 +0300 Subject: another small fix --- trex_toc.asciidoc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/trex_toc.asciidoc b/trex_toc.asciidoc index e620f9a3..2eb2d2b0 100644 --- a/trex_toc.asciidoc +++ b/trex_toc.asciidoc @@ -112,7 +112,7 @@ ifdef::backend-xhtml11[] } .jstree-wholerow.jstree-wholerow-clicked { - background-image: url('images\icons\selected_tab_bg.png'); + background-image: url('images/icons/selected_tab_bg.png'); background-repeat: repeat-x; color: #fff !important; text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); @@ -125,8 +125,7 @@ ifdef::backend-xhtml11[] position: fixed !important; top: 0px !important; cursor: e-resize !important; - background: url('images\splitbar.png') repeat scroll right center transparent !important; - background-image: url('images\splitbar.png'); + background: url('images/splitbar.png') repeat scroll right center transparent !important; } .jstree-default .jstree-themeicon{ -- cgit 1.2.3-korg From 43df47df4ce28aef57704fa78cf29c43df9220e2 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 2 May 2016 15:05:35 +0300 Subject: another minor toc issue --- trex_toc.asciidoc | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/trex_toc.asciidoc b/trex_toc.asciidoc index 2eb2d2b0..46c51c2c 100644 --- a/trex_toc.asciidoc +++ b/trex_toc.asciidoc @@ -214,13 +214,16 @@ ifdef::backend-xhtml11[] // On resize $("#toc").resizable({ resize: function(e, ui) { - var tocWidth = $(toc).outerWidth(); - body.css({"marginLeft":parseInt(tocWidth)+20+"px"}); - $(".ui-resizable-e").css({"right":$(window).width()-parseInt(tocWidth)+"px"}); - $("#toctitle").css({"width":parseInt(tocWidth)-45+"px"}); + resized(); }, handles: 'e' }); + + // On zoom changed + $(window).resize(function() { + resized(); + }); + // Do it for the first time var tocWidth = $(toc).outerWidth(); @@ -229,6 +232,16 @@ ifdef::backend-xhtml11[] } + function resized(){ + var body = $("body"); + var tocWidth = $(toc).outerWidth(); + + body.css({"marginLeft":parseInt(tocWidth)+20+"px"}); + $(".ui-resizable-e").css({"right":$(window).width()-parseInt(tocWidth)+"px"}); + $("#toctitle").css({"width":parseInt(tocWidth)-45+"px"}); + } + + function toggleTOC(){ var isOpen = true; $( "#toggle" ).click(function() { -- cgit 1.2.3-korg From 02312c128a7827e420bcf50e53a72363e9be5a63 Mon Sep 17 00:00:00 2001 From: DavidBlock Date: Wed, 4 May 2016 10:47:19 +0300 Subject: David edits of trex_book.asciidoc --- 1 | 0 draft_trex_stateless-docinfo.html | 0 draft_trex_stateless.asciidoc | 0 draft_trex_stateless_moved1.asciidoc | 0 packet_builder_yaml.asciidoc | 0 trex_book.asciidoc | 502 +++++++++++++++++++---------------- trex_console.asciidoc | 0 trex_ga.asciidoc | 0 trex_stateless-docinfo.html | 0 vm_doc.asciidoc | 0 waf1.css | 0 ws_main.py | 0 12 files changed, 276 insertions(+), 226 deletions(-) mode change 100644 => 100755 1 mode change 100644 => 100755 draft_trex_stateless-docinfo.html mode change 100644 => 100755 draft_trex_stateless.asciidoc mode change 100644 => 100755 draft_trex_stateless_moved1.asciidoc mode change 100644 => 100755 packet_builder_yaml.asciidoc mode change 100644 => 100755 trex_console.asciidoc mode change 100644 => 100755 trex_ga.asciidoc mode change 100644 => 100755 trex_stateless-docinfo.html mode change 100644 => 100755 vm_doc.asciidoc mode change 100644 => 100755 waf1.css mode change 100644 => 100755 ws_main.py diff --git a/1 b/1 old mode 100644 new mode 100755 diff --git a/draft_trex_stateless-docinfo.html b/draft_trex_stateless-docinfo.html old mode 100644 new mode 100755 diff --git a/draft_trex_stateless.asciidoc b/draft_trex_stateless.asciidoc old mode 100644 new mode 100755 diff --git a/draft_trex_stateless_moved1.asciidoc b/draft_trex_stateless_moved1.asciidoc old mode 100644 new mode 100755 diff --git a/packet_builder_yaml.asciidoc b/packet_builder_yaml.asciidoc old mode 100644 new mode 100755 diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 2326f3ec..ed45b92c 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -2,7 +2,7 @@ TRex ==== :author: hhaim :email: -:revnumber: 2.0 +:revnumber: 2.1 :quotes.++: :numbered: :web_server_url: http://trex-tgn.cisco.com/trex @@ -20,41 +20,44 @@ Traditionally, routers have been tested using commercial traffic generators, whi typically has been measured using packets per second (PPS) metrics. As router functionality and services have become more complex, stateful traffic generators have become necessary to provide more realistic application traffic scenarios. -The advantages of realistic traffic generators are: -* Providing more accurate performance numbers -* Finding real bottlenecks +Advantages of realistic traffic generators: + +* Accurate performance metrics +* Discovering bottlenecks in realistic traffic scenarios ==== Current Challenges: -* *Cost* : Commercial State-full traffic generators are expensive -* *Scale* : Bandwidth does not scale up well with features complexity -* *Standardization* : Lack of standardization of traffic patterns and methodologies -* *Flexibility* : Commercial tools do not allow agility when flexibility and changes are needed +* *Cost*: Commercial stateful traffic generators are expensive +* *Scale*: Bandwidth does not scale up well with feature complexity +* *Standardization*: Lack of standardization of traffic patterns and methodologies +* *Flexibility*: Commercial tools do not allow agility when flexibility and changes are needed ==== Implications -* High capital expenditure (capex) spent by different teams -* Testing in low scale and extrapolation became a common practice, it is not accurate, and hides real life bottlenecks and quality issues -* Different feature / platform teams benchmark and results methodology -* Delays in development and testing due to testing tools features dependency -* Resource and effort investment in developing different ad hoc tools and test methodologies +* High capital expenditure (capex) spent by different teams. +* Testing in low scale and extrapolation became a common practice. This is non-ideal and fails to indicate bottlenecks that appear in real-world scenarios. +* Teams use different benchmark methodologies, so results are not standardized. +* Delays in development and testing due to dependence on testing tool features. +* Resource and effort investment in developing different ad hoc tools and test methodologies. === Overview of TRex -TRex addresses these problems through an innovative and extendable software implementation and by leveraging standard and open SW and x86/UCS HW. +TRex addresses these problems through an innovative and extendable software implementation and by leveraging standard and open software and x86/UCS hardware. -* Generates and analyzes L4-7 traffic and able to provide in one tool capabilities provided by commercial L7 tools. +* Generates and analyzes L4-7 traffic. In one package, provides capabilities of commercial L7 tools. * Stateful traffic generator based on pre-processing and smart replay of real traffic templates. * Generates and *amplifies* both client and server side traffic. * Customized functionality can be added. -* Scale to 200Gb/sec for one UCS ( using Intel 40Gb/sec NICS) +* Scales to 200Gb/sec for one UCS (using Intel 40Gb/sec NICs) * Low cost -* Virtual interfaces support, enable TRex to be used in a fully virtual environment without physical NICs and the following example use cases: +* Self-contained package that can be easily installed and deployed +* Virtual interface support enables TRex to be used in a fully virtual environment without physical NICs. Example use cases: ** Amazon AWS ** Cisco LaaS +// Which LaaS is this? Location as a service? Linux? ** TRex on your laptop -** Self-contained packaging that can be easily installed and deployed + .TRex Hardware @@ -66,14 +69,14 @@ TRex addresses these problems through an innovative and extendable software impl === Purpose of this guide -This guide explains the use of TRex internals and the use of TRex in conjunction with Cisco ASR1000 Series routers. The examples illustrate novel traffic generation techniques made possible by TRex. +This guide explains the use of TRex internals and the use of TRex together with Cisco ASR1000 Series routers. The examples illustrate novel traffic generation techniques made possible by TRex. == Download and installation -=== Hardware recommendation +=== Hardware recommendations TRex operates in a Linux application environment, interacting with Linux kernel modules. -TRex curretly works on x86 architecture and can operates well on Cisco UCS hardware. The following platforms have been tested and are recommended for operating TRex. +TRex curretly works on x86 architecture and can operate well on Cisco UCS hardware. The following platforms have been tested and are recommended for operating TRex. [NOTE] ===================================== @@ -82,85 +85,94 @@ TRex curretly works on x86 architecture and can operates well on Cisco UCS hardw [NOTE] ===================================== -Not all supported DPDK interfaces are supported by TRex + Not all supported DPDK interfaces are supported by TRex ===================================== -.Preferred UCS +.Preferred UCS hardware [options="header",cols="1,3"] |================= | UCS Type | Comments -| UCS C220 M3/M4 | *Prefered, Low-End*, Supports up to 40Gb/sec with 540-D2 and with newer Intel NIC 80Gb/sec with 1RU, recommended -| UCS C200| Early UCS model -| UCS C210 M2 | Supports up to 40Gb/sec PCIe3.0 -| UCS C240 M3/M4 | *Prefered, High-End* Supports up to 200Gb/sec. 6x XL710 NICS (PCIex8) or 2xFM10K (PCIex16) -| UCS C260M2 | Supports up to 30Gb/sec due to V2 PCIe. +| UCS C220 M3/M4 | *Preferred Low-End*. Supports up to 40Gb/sec with 540-D2. With newer Intel NIC (recommended), supports 80Gb/sec with 1RU. See table below describing components. +| UCS C200| Early UCS model. +| UCS C210 M2 | Supports up to 40Gb/sec PCIe3.0. +| UCS C240 M3/M4 | *Preferred, High-End* Supports up to 200Gb/sec. 6x XL710 NICS (PCIex8) or 2xFM10K (PCIex16). See table below describing components. +| UCS C260M2 | Supports up to 30Gb/sec (limited by V2 PCIe). |================= -.Internal Components Low-End C220M4 +.Low-End UCS C220 M4 - Internal components [options="header",cols="1,2",width="60%"] |================= | Components | Details -| CPU | 2x CPU E5-2620/2.0 GHz -| CPU Configuration | 2-Socket CPU configurations (can also work with one CPU) -| Memory | 2x4 banks for each CPU. Total of 8 BANKS ==> 32GB -| NO RAID | NO RAID +| CPU | 2x E5-2620 @ 2.0 GHz. +| CPU Configuration | 2-Socket CPU configurations (also works with 1 CPU). +| Memory | 2x4 banks f.or each CPU. Total of 32GB in 8 banks. +| RAID | No RAID. |================= -.Internal Components High-End C240M4 +.High-End C240 M4 - Internal components [options="header",cols="1,2",width="60%"] |================= | Components | Details -| CPU | 2x CPU E5-2667 /3.20 GHz -| PCIe | 1x ,Riser PCI expantion card option A PID UCSC-PCI-1A-240M4 this will give the option to have two PCIex16 -| CPU Configuration | 2-Socket CPU configurations (can also work with one CPU) -| Memory | 2x4 banks for each CPU. Total of 8 BANKS ==> 32GB -| NO RAID | NO RAID +| CPU | 2x E5-2667 @ 3.20 GHz. +| PCIe | 1x Riser PCI expansion card option A PID UCSC-PCI-1A-240M4 enables 2 PCIex16. +| CPU Configuration | 2-Socket CPU configurations (also works with 1 CPU). +| Memory | 2x4 banks for each CPU. Total of 32GB in 8 banks. +| RAID | No RAID. |================= -.Supported NICS +.Supported NICs [options="header",cols="1,1,2",width="50%"] |================= | Bandwidth | Chipset | Example | 1Gb/sec | Intel I350 | Intel 4x1GE 350-T4 NIC | 10Gb/sec | Intel 82599| Intel x520-D2 Cisco Order tool 2X Intel N2XX-AIPCI01, Intel X520 Dual Port 10Gb SFP+ Adapter -| 10Gb/sec | Intel X710 | SFP+, *Preferred* support per stream stats in hardware link:http://www.silicom-usa.com/PE310G4i71L_Quad_Port_Fiber_SFP+_10_Gigabit_Ethernet_PCI_Express_Server_Adapter_49[PE310G4i71L] -| 40Gb/sec | Intel XL710 | QSFP+ (copper/optical) -| 100Gb/sec | Intel Intel FM10420 | QSFP28, by Silicom link:http://www.silicom-usa.com/100_Gigabit_Dual_Port_Fiber_Ethernet_PCI_Express_PE3100G2DQiR_96[PE3100G2DQiR_96] *under dev* +| 10Gb/sec | Intel X710 | link:https://en.wikipedia.org/wiki/Small_form-factor_pluggable_transceiver[SFP+], *Preferred* support per stream stats in hardware link:http://www.silicom-usa.com/PE310G4i71L_Quad_Port_Fiber_SFP+_10_Gigabit_Ethernet_PCI_Express_Server_Adapter_49[Silicom PE310G4i71L] +| 40Gb/sec | Intel XL710 | link:https://en.wikipedia.org/wiki/QSFP[QSFP+] (copper/optical) +| 100Gb/sec | Intel Intel FM10420 | QSFP28, by Silicom link:http://www.silicom-usa.com/100_Gigabit_Dual_Port_Fiber_Ethernet_PCI_Express_PE3100G2DQiR_96[Silicom PE3100G2DQiR_96] (*in development*) | VMXNET / + -VMXNET3 (read notes) | VMware paravirtualize | connect using vmWare vSwitch -| E1000 | paravirtualize | vmWare/KVM/VirtualBox +VMXNET3 (see notes) | VMware paravirtualized | Connect using VMware vSwitch +| E1000 | paravirtualized | VMware/KVM/VirtualBox |================= -.X710 SFP+ support (*for Silicom PE310G4i71L with Open Optic*) +// in table above, is it correct to list "paravirtualized" as chipset? Also, what is QSFP28? It does not appear on the lined URL. Clarify: is Intel X710 the preferred NIC? + +.Intel X710 SFP+ support (for link:http://www.silicom-usa.com/PE310G4i71L_Quad_Port_Fiber_SFP+_10_Gigabit_Ethernet_PCI_Express_Server_Adapter_49[Silicom PE310G4i71L] with Open Optic) [options="header",cols="1,1",width="70%"] |================= -| SFP+ | Example -| Cisco SFP-10G-SR | see link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[here] -| Cisco SFP-10G-LR | see link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[here] -| Cisco SFP-H10GB-CU1M | see link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[here] -| Cisco SFP-10G-AOC1M | see link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[here] +| link:https://en.wikipedia.org/wiki/Small_form-factor_pluggable_transceiver[SFP+] | Notes +| Cisco SFP-10G-SR | link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Info] +| Cisco SFP-10G-LR | link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Info] +| Cisco SFP-H10GB-CU1M | link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Info] +| Cisco SFP-10G-AOC1M | link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Info] |================= -WARNING: Intel X710 NIC for example FH X710DA4FHBLK will work *only* with Intel SFP+. In case you like an open optic buy Silicom PE310G4i71L NIC +[NOTE] +===================================== + Intel X710 NIC (example: FH X710DA4FHBLK) operates *only* with Intel SFP+. For open optic, use the link:http://www.silicom-usa.com/PE310G4i71L_Quad_Port_Fiber_SFP+_10_Gigabit_Ethernet_PCI_Express_Server_Adapter_49[Silicom PE310G4i71L] NIC. +===================================== + +// clarify above table and note -.XL710 QSFP+ support +.Intel XL710 QSFP+ support [options="header",cols="1,1",width="70%"] |================= -| QSFP+ | Example -| QSFP+ SR4 optics | APPROVED OPTICS For Intel NICS, Cisco QSFP-40G-SR4-S does *not* work link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] -| QSFP+ LR-4 Optics | APPROVED OPTICS For Intel NICS , Cisco QSFP-40G-LR4-S does *not* work link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] -| QSFP Active Optical Cables (AoC) | QSFP-H40G-AOC link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +| link:https://en.wikipedia.org/wiki/QSFP[QSFP+] | Notes +| QSFP+ SR4 optics | APPROVED OPTICS For Intel NICS, Cisco QSFP-40G-SR4-S does *not* work link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +| QSFP+ LR-4 Optics | APPROVED OPTICS For Intel NICS , Cisco QSFP-40G-LR4-S does *not* work link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +| QSFP Active Optical Cables (AoC) | QSFP-H40G-AOC link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] | QSFP+ Intel Ethernet Modular Optics | | QSFP+ DA twin-ax cables | -| Active QSFP+ Copper Cables | Cisco QSFP-4SFP10G-CU link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +| Active QSFP+ Copper Cables | Cisco QSFP-4SFP10G-CU link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] |================= [NOTE] ===================================== - For Intel XL710 NICS, Cisco SR4/LR QSFP+ won't work you can buy Silicom with Open Optic + For Intel XL710 NICs, Cisco SR4/LR QSFP+ does not operate. Use Silicom with Open Optic. ===================================== +// clarify above table and note. let's discuss. + .FM10K QSFP28 support [options="header",cols="1,1",width="70%"] |================= @@ -168,72 +180,79 @@ WARNING: Intel X710 NIC for example FH X710DA4FHBLK will work *only* with Intel | todo | todo |================= +// do we want to show "todo"? maybe "pending" + [IMPORTANT] ===================================== -* For VMXNET3 use Ubuntu and *not* Fedora 18. Fedora 18 will crash. -* Intel SFP+ 10Gb/Sec is the only one supported by default on the standard Linux driver. TRex also supports Cisco 10Gb/sec SFP+. -* Using different NUMA for different NIC is very important when getting to high speeds, such as using several Intel XL710 40Gb/sec. + - One can verify NUMA and NIC topology with following command: lstopo (yum install hwloc) + - NUMAs-CPUs relation is determined with following command: lscpu + - See real example of NUMA usage xref:numa-example[here] -* Using Intel XL710 with Fedora 18 requires updating Kernel: -** > sudo yum update kernel -** > sudo yum update kernel-devel -** > sudo yum update kernel-headers -* For Intel XL710 NICs there is a need to verify the NVM is v4.42 or v4.53 see xref:xl710-firmware[here] for more info -** > sudo ./t-rex-64 -f cap2/dns.yaml -d 0 *-v 6* --nc | grep NVM + - PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc + - PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc + - PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc + - PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc +* For VMXNET3, use Ubuntu. Fedora 18 is not supported and causes crash. +* Intel SFP+ 10Gb/sec is the only one supported by default on the standard Linux driver. TRex also supports Cisco 10Gb/sec SFP+. +// above, replace "only one" with "only mode"? +* For operating high speed throughput (example: several Intel XL710 40Gb/sec), use different link:https://en.wikipedia.org/wiki/Non-uniform_memory_access[NUMA] nodes for different NICs. + + To verify NUMA and NIC topology: `lstopo (yum install hwloc)` + + To display CPU info, including NUMA node: `lscpu` + + NUMA usage xref:numa-example[example] +* Using Intel XL710 with Fedora 18 requires updating kernel: +** `> sudo yum update kernel` +** `> sudo yum update kernel-devel` +** `> sudo yum update kernel-headers` +* For Intel XL710 NICs, verify that the NVM is v4.42 or v4.53. xref:xl710-firmware[Info]. +** `> sudo ./t-rex-64 -f cap2/dns.yaml -d 0 *-v 6* --nc | grep NVM` + + `PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc` + + `PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc` + + `PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc` + + `PMD: FW 4.22 API 1.2 *NVM 04.04.02* eetrack 800013fc` ===================================== -.Sample order for low-end UCSC-C220-M3S with 4x10Gb ports -[options="header",cols="2,1^",width="50%"] +// above, maybe rename the bullet points "NIC usage notes"? should we create a subsection for NICs? Maybe it would be under "2.1 Hardware recommendations" as a subsection. + + +.Sample order for recommended low-end Cisco UCSC-C220-M3S with 4x10Gb ports +[options="header",cols="1,1",width="70%"] +|================= +| Component | Quantity +| UCSC-C220-M3S | 1 +| UCS-CPU-E5-2650 | 2 +| UCS-MR-1X041RY-A | 8 +| A03-D500GC3 | 1 +| N2XX-AIPCI01 | 2 +| UCSC-PSU-650W | 1 +| SFS-250V-10A-IS | 1 +| UCSC-CMA1 | 1 +| UCSC-HS-C220M3 | 2 +| N20-BBLKD | 7 +| UCSC-PSU-BLKP | 1 +| UCSC-RAIL1 | 1 |================= -| Component | Amount -| UCSC-C220-M3S | 1 -| UCS-CPU-E5-2650 | 2 -| UCS-MR-1X041RY-A | 8 -| A03-D500GC3 | 1 -| N2XX-AIPCI01 | 2 -| UCSC-PSU-650W | 1 -| SFS-250V-10A-IS | 1 -| UCSC-CMA1 | 1 -| UCSC-HS-C220M3 | 2 -| N20-BBLKD | 7 -| UCSC-PSU-BLKP | 1 -| UCSC-RAIL1 | 1 -|======================== - -NOTE: You should buy seperatly the 10Gb/sec SFP+, Cisco would be fine with TRex (but not for plain Linux driver). - -=== Install OS + +// should table above say "low-end Cisco UCS C220 M3S" instead of "low-end USCS-C220-M3S"? + +NOTE: Purchase the 10Gb/sec SFP+ separately. Cisco would be fine with TRex (but not for plain Linux driver). +// does note above mean "TRex operates with 10Gb/sec SFP+ components, but plain Linux does not provide drivers."? if so, how does purchasing separately solve this? where do they get drivers? + +=== Installing OS ==== Supported versions -Fedora 18-20 , and Ubuntu 14.04.1 LTS are the Linux OS supported. -You should install the *64bit* Kernel version. -More 64bit OS could be supported by compiling the drivers. +Supported Linux versions: +* Fedora 18-20, 64-bit kernel (not 32-bit) +* Ubuntu 14.04.1 LTS, 64-bit kernel (not 32-bit) -WARNING: Only *64bit* Kernels are supported +NOTE: Additional OS version may be supported by compiling the necessary drivers. +// we should indicate exactly which drivers this means -To verify that your kernel is 64bit version try this +To check whether a kernel is 64-bit, verify that the ouput of the following command is `x86_64`. [source,bash] ---- $uname -m -x86_64 #<1> +x86_64 ---- -<1> x86_64 is the desired output - +==== Download Linux -==== Download ISO file - -The ISO images of the described Linux OS can be downloaded from the following links: +ISO images for supported Linux releases can be downloaded from: .Supported Linux ISO image links [options="header",cols="1^,2^",width="50%"] @@ -251,18 +270,20 @@ The ISO images of the described Linux OS can be downloaded from the following li | http://old-releases.ubuntu.com/releases/14.04.1/SHA256SUMS[Ubuntu 14.04 CHECKSUM] |====================================== -For Fedora, you can get link close to your location at: + +For Fedora downloads... + +* Select a mirror close to your location: + https://admin.fedoraproject.org/mirrormanager/mirrors/Fedora + Choose: "Fedora Linux http" -> releases -> -> Server -> x86_64 -> iso -> Fedora-Server-DVD-x86_64-.iso -Then, verify the checksum of the downloaded file matches the linked checksum values with the `sha256sum` command. For example: +* Verify the checksum of the downloaded file matches the linked checksum values with the `sha256sum` command. Example: [source,bash] ---- $sha256sum Fedora-18-x86_64-DVD.iso 91c5f0aca391acf76a047e284144f90d66d3d5f5dcd26b01f368a43236832c03 #<1> ---- -<1> Should be equal to the sha256 values described in the linked CHECKSUM files. +<1> Should be equal to the link:https://en.wikipedia.org/wiki/SHA-2[SHA-256] values described in the linked checksum files. ==== Install Linux @@ -271,12 +292,21 @@ Ask your lab admin to install the Linux using CIMC, assign an IP, and set the DN xref:fedora21_example[Example of installing Fedora 21 Server] -IMPORTANT: To use TRex, you should have sudo on this machine or root password. -WARNING: Upgrading the linux Kernel using `yum upgrade` require to build the TRex drivers. +[NOTE] +===================================== + * To use TRex, you should have sudo on the machine or the root password. + * Upgrading the linux Kernel using `yum upgrade` requires building the TRex drivers. +===================================== ==== Verify Intel NIC installation -The following is an example of 4x10Gb/sec TRex with I350 management port and four x520-D2 (82599 chipset): +Use `lspci` to verify the NIC installation. + +Example 4x 10Gb/sec TRex configuration (see output below): + +* I350 management port + +* 4x Intel Ethernet Converged Network Adapter model x520-D2 (82599 chipset) [source,bash] ---- @@ -290,14 +320,15 @@ $[root@trex]lspci | grep Ethernet ---- <1> Management port <2> CIMC port -<3> 10Gb/sec traffic ports ( Intel 82599EB) +<3> 10Gb/sec traffic ports (Intel 82599EB) === Obtaining the TRex package -Connect by ssh to the TRex machine and do the following: +Connect by `ssh` to the TRex machine and execute the commands described below. -assuming *$WEB_URL* is *{web_server_url}* or *{local_web_server_url}* (cisco internal) +NOTE: Prerequisite: *$WEB_URL* is *{web_server_url}* or *{local_web_server_url}* (Cisco internal) +Latest release: [source,bash] ---- $mkdir trex @@ -307,7 +338,7 @@ $tar -xzvf latest ---- -to take the bleeding edge version +Bleeding edge version: [source,bash] ---- $wget --no-cache $WEB_URL/release/be_latest @@ -319,14 +350,14 @@ To obtain a specific version, do the following: $wget --no-cache $WEB_URL/release/vX.XX.tar.gz #<1> ---- -<1> X.XX = The version number +<1> X.XX = Version number === Running TRex for the first time in loopback -If you have 10Gb/sec TRex (based on Intel 520-D2 NICs) you can verify that it works correctly by loopback the ports. +If you have a 10Gb/sec TRex (based on Intel 520-D2 NICs), you can verify that it works correctly by loopback on the ports. You can install Intel SFP+ or Cisco SFP+, but you cannot connect ports that are on the same NIC to each other (it might not sync). -If you have only one NIC of 10gb/sec you cannot perform this test beacause the ports will not have valid link. -Another option for loopback is to use Cisco twinax copper cable see link:http://www.fiberopticshare.com/tag/cisco-10g-twinax[here] +If you have only one NIC of 10gb/sec you cannot perform this test because the ports will not have a valid link. +Another option for loopback is to use link:http://www.fiberopticshare.com/tag/cisco-10g-twinax[Cisco twinax copper cable]. //TBD: perhaps rephase, using a "Prerequisites" or "Required" heading. The requirement here would be: Two (2) 10gb/sec NICs //[hh] it is not accurate beacuse with 1Gb/sec you can have this test @@ -337,7 +368,9 @@ image:images/loopback_right.png[title="rigt"] .Wrong loopback image:images/loopback_wrong.png[title="rigt"] -In case you have 1Gb/Sec Intel NIC (I350) or XL710/X710 NIC you can do anything you like from the loopback perspective *but* you must filter the management port before see xref:trex_config[here]. +If you have a 1Gb/Sec Intel NIC (I350) or XL710/X710 NIC, you can do anything you like from the loopback perspective *but* first filter the management port - see xref:trex_config[TRex Configuration]. + +// above, clarify "you can do anything you like from the loopback perspective" ==== Identify the ports @@ -366,20 +399,22 @@ In case you have 1Gb/Sec Intel NIC (I350) or XL710/X710 NIC you can do anything <3> TRex interface #3 before unbinding <4> TRex interface #4 before unbinding -Now choose the port you want to use and follow the next section by creating a configuration file. +Choose a port to use and follow instructions in the next section to create a configuration file. ==== Create minimum configuration file -Create a configuration file in `/etc/trex_cfg.yaml`. +Create a configuration file: `/etc/trex_cfg.yaml`. -You could copy a basic configuration file from cfg folder by running this command. +You can copy a basic configuration file from cfg folder by running this command... [source,bash] ---- $cp cfg/simple_cfg.yaml /etc/trex_cfg.yaml ---- -Now edit the configuration file with the right values from the previous section +...and edit the configuration file with the desired values. + +Example: [source,bash] ---- @@ -388,14 +423,16 @@ Now edit the configuration file with the right values from the previous section version : 2 #<2> interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] #<3> ---- -<1> the number of ports -<2> must add version 2 to the configuration file -<3> The list of interface from `#>sudo ./dpdk_setup_ports.py -s`, in this example it was taken +<1> Mumber of ports +<2> Must add version 2 to the configuration file +<3> List of interfaces displayed by `#>sudo ./dpdk_setup_ports.py -s` -When working with VM, you must set the destination mac of one port as the source or the other for loopback the port in the vSwitch +When working with a VM, set the destination MAC of one port as the source or the other for loopback the port in the vSwitch and you should take the right value from the hypervisor (in case of a physical NIC you can set the MAC address with virtual you can't and you should take it from the hypervisor) and example +// Clarify paragraph above. + [source,python] ---- - port_limit : 2 @@ -407,20 +444,21 @@ and example - dest_mac : [0x2,0x0,0x0,0x2,0x0,0x00] # port 1 <1> src_mac : [0x1,0x0,0x0,0x1,0x0,0x00] ---- -<1> Source mac is like destination mac (this should be set or taken from vmware). the mac was taken from hypervisor -<2> Currently TRex has a limitation and support only one type of NIC at a time. You can't mix different type of NIC in one config file. see here for more info link:http://trex-tgn.cisco.com/youtrack/issue/trex-197[trex-201] +<1> Source MAC is like destination MAC (this should be set or taken from VMware). The MAC was taken from the hypervisor. +<2> Currently TRex supports only one type of NIC at a time. You cannot mix different NIC types in one config file. For more info, see link:http://trex-tgn.cisco.com/youtrack/issue/trex-197[trex-201]. + +// where can we describe this limitation (TRex supports only one type of NIC at a time. You cannot mix different NIC types in one config file.) and other limitations? -==== Running TRex +==== Run TRex -Run this for 4x10Gb/sec TRex: +Use the following command to begin operation of a 4x 10Gb/sec TRex: [source,bash] ---- $sudo ./t-rex-64 -f cap2/dns.yaml -c 4 -m 1 -d 100 -l 1000 ---- -NOTE: For 10Gb/sec TRex with 2,6, or 8 ports, add --limit-ports [number of ports] *or* follow xref:trex_config[this] to configure the TRex. -//TBD: recommend bold for the 2 commands. +NOTE: For a 10Gb/sec TRex with 2, 6, or 8 ports, add `--limit-ports [number of ports]` *or* follow xref:trex_config[these instructions] to configure TRex. If successful, the output will be similar to the following: @@ -492,70 +530,73 @@ zmq publisher at: tcp://*:4500 <3> Total Rx must be the same as Tx <4> Tx_ok == Rx_ok <5> Tx_ok == Rx_ok -<6> Number of TRex active "flows". Could be diffrent than the Router flows due to aging issues. Usualy TRex number of active flows is much lower that router. +<6> Number of TRex active "flows". Could be different than the number of router flows, due to aging issues. Usualy the TRex number of active flows is much lower that of the router. <7> Number of TRex flows from startup. <8> Drop rate. -<9> Expected Packet Per Second (without the latency packets). -<10> Expected Connection Per Second (without the latency packets). -<11> Expected Bit Per Second (without the latency packets). +<9> Expected packets per second (calculated without latency packets). +<10> Expected connections per second (calculated without latency packets). +<11> Expected bits per second (calculated without latency packets). <12> Average CPU utilization of transmitters threads. For best results it should be lower than 80%. -<13> Gb/sec generated per core of DP. Higer is better. +<13> Gb/sec generated per core of DP. Higher is better. <14> Rx and latency thread CPU utilization. -More statistic information: +More statistics information: -*socket*:: same as the active flows. +*socket*:: Same as the active flows. -*Socket/Clients*:: is equal active_flows/#clients, average of active flow per client. +*Socket/Clients*:: Average of active flows per client, calculated as active_flows/#clients. -*Socket-util*:: is equal to ~(100*active_flows/#clients)/64K equal to (average active flows per client*100/64K ) in words, it give an estimation of how many socket ports are used per client IP. Utilization of more than 50% means that TRex is generating too many flows per one client and you need to add more clients. +*Socket-util*:: Estimate of how many socket ports are used per client IP. This is approximately ~(100*active_flows/#clients)/64K, calculated as (average active flows per client*100/64K). Utilization of more than 50% means that TRex is generating too many flows per single client, and that more clients must be added. +// clarify above, especially the formula -*Max window*:: shows a momentary maximum latency for a time window of 500msec. There are a few numbers per number of windows that are shown. +*Max window*:: Momentary maximum latency for a time window of 500 msec. There are a few numbers per number of windows that are shown. The new number (the last 500msec) is the right number. The oldest in the left number. This can help to identify spikes of high latency that after a time clear.in a contrast the maximum latency will stuck at the maximum value for all the test. +//clarify above -*Platform_factor*:: There are cases that we duplicate the traffic using splitter/Switch and we would like all the number to be multiplied by this factor (e.g. x2) - +*Platform_factor*:: There are cases that we duplicate the traffic using splitter/switch and we would like all the number to be multiplied by this factor (e.g. x2) +//clarify above WARNING: If you don't see rx packets, revisit your MAC address configuration. +//clarify above ==== Running TRex for the first time with ESXi: -* Virtual NICs can be used to bridge between TRex and non-supported NICs or get some basic impression/testing. Bandwidth is limited by vSwitch, has ipv6 issues. +* Virtual NICs can be used to bridge between TRex and non-supported NICs, or for basic testing. Bandwidth is limited by vSwitch, has IPv6 issues. +// clarify, especially what IPv6 issues -1. Click on the host machine, enter Configuration -> Networking. +1. Click the host machine, enter Configuration -> Networking. -a. One of the NICs should be connected to the main vSwitch network to get "outside" connection, for the TRex client and ssh: + +a. One of the NICs should be connected to the main vSwitch network to get an "outside" connection, for the TRex client and ssh: + image:images/vSwitch_main.png[title="vSwitch_main"] b. Other NICs that are used for TRex traffic should be in distinguish vSwitch: + image:images/vSwitch_loopback.png[title="vSwitch_loopback"] -2. Right click on guest machine -> Edit settings -> Ensure the NICs are set to their networks: + +2. Right-click guest machine -> Edit settings -> Ensure the NICs are set to their networks: + image:images/vSwitch_networks.png[title="vSwitch_networks"] [NOTE] ===================================================================== -Current limitation: following command will not work as excepted: +Current limitation: The following command does not function as expected: .... sudo ./t-rex-64 -f cap2/dns.yaml --lm 1 --lo -l 1000 -d 100 .... -vSwitch can't know where to "route" the packet, it supposed to be fixed once TRex supports ARP +The vSwitch does not "know" where to route the packet. This is expected to be fixed when TRex supports ARP. ===================================================================== -* Pass-through is the way to use directly the NICs from host machine inside the VM. Has no limitations except the NIC/hardware itself. The only difference via bare-metal OS is seldom spikes of latency (~10ms). Passthrough settings can't be saved to OVA. +* Pass-through is the way to use directly the NICs from host machine inside the VM. Has no limitations except the NIC/hardware itself. The only difference via bare-metal OS is occasional spikes of latency (~10ms). Passthrough settings cannot be saved to OVA. -1. Click on the host machine, enter Configuration -> Advanced settings -> Edit. Mark the wanted NICs. Reboot the ESXi to apply. + +1. Click on the host machine. Enter Configuration -> Advanced settings -> Edit. Mark the desired NICs. Reboot the ESXi to apply. + image:images/passthrough_marking.png[title="passthrough_marking"] -2. Right click on guest machine -> Edit settings -> Add -> *PCI device* -> Choose the NICs one by one. + +2. Right click on guest machine. Edit settings -> Add -> *PCI device* -> Choose the NICs one by one. + image:images/passthrough_adding.png[title="passthrough_adding"] ==== Running TRex for the first time with router You can follow this presentation: link:trex_config_guide.html[first time TRex configuration] -//TBD: Note that the link does not work correctly in PDF rendition or continue reading. Without config file, TRex sets source MAC of all ports to `00:00:00:01:00:00` and expects to receive packets with this destination MAC address. So, you just need to configure your router with static ARP entry pointing to the above MAC address. @@ -569,9 +610,9 @@ include::trex_book_basic.asciidoc[] === VLAN Trunk support anchor:trex_valn[] -The VLAN Trunk TRex feature attempts to solve the router port bandwidth limitation when the traffic profile is asymmetric. Example: SFR profile is asymmetric and was the first usecase. +The VLAN Trunk TRex feature attempts to solve the router port bandwidth limitation when the traffic profile is asymmetric. Example: Asymmetric SFR profile. This feature converts asymmetric traffic to symmetric, from the port perspective, using router sub-interfaces. -This feature requires TRex to send the traffic on two VLANs. The following describes how this works. +This requires TRex to send the traffic on two VLANs, as described below. .YAML format [source,python] @@ -586,18 +627,18 @@ This feature requires TRex to send the traffic on two VLANs. The following descr - duration : 0.1 vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 } <1> ---- -<1> enable VLAN feature , valn0==100 , valn1==200 +<1> Enable VLAN feature, valn0==100 , valn1==200 *Problem definition:*:: -Assuming a TRex with two ports and an SFR traffic profile. +Scenario: TRex with two ports and an SFR traffic profile. .Without VLAN/sub interfaces [source,python] ---- 0 ( client) -> [ ] - 1 ( server) ---- -Without VLAN support it is not symmetric. From port 0 (client side), it sends 10%, from and port 1 (server) sends 90%. Port 1 become the bottlneck (10Gb/s limit) before port 0 +Without VLAN support the traffic is asymmetric. 10% of the traffic is sent from port 0 (client side), 90% is from port 1 (server). Port 1 become the bottlneck (10Gb/s limit) before port 0. .With VLAN/sub interfaces [source,python] @@ -606,7 +647,7 @@ port 0 ( client VLAN0) <-> | | <-> port 1 ( server-VLAN0) port 0 ( server VLAN1) <-> | | <-> port 1 ( client-VLAN1) ---- -In this case both ports will have the same amount of traffic. +In this case both ports have the same amount of traffic. *Router configuation:*:: [source,python] @@ -667,18 +708,20 @@ In this case both ports will have the same amount of traffic. set ip next-hop 11.88.11.12 ! ---- -<1> Disable the IP on the main port it is important +<1> Disable the IP on the main port it is important. +// above, clarify what's important <2> Enable VLAN1 <3> PBR configuration <4> Enable VLAN2 <5> PBR configuration -<6> TRex MAC-address destination port +<6> TRex destination port MAC address <7> PBR configuration rules === Static source MAC address setting With this feature, TRex replaces the source MAC address with the client IP address. -Note: This feature was requested by the Cisco ISG group. + + Note: This feature was requested by the Cisco ISG group. *YAML:*:: @@ -694,18 +737,17 @@ Note: This feature was requested by the Cisco ISG group. .. mac_override_by_ip : true <1> ---- -<1> In this case, the client side MAC address will be look like this: +<1> In this case, the client side MAC address looks like this: SRC_MAC = IPV4(IP) + 00:00 -=== IPv6 support ( `--ipv6`); +=== IPv6 support (`--ipv6`) Support for IPv6 includes: 1. Support for pcap files containing IPv6 packets 2. Ability to generate IPv6 traffic from pcap files containing IPv4 packets The following switch enables this feature: `--ipv6` -Two new keywords (src_ipv6, dst_ipv6) have been added to the YAML -file to specify the most significant 96-bits of the IPv6 address - for example: +Two new keywords (`src_ipv6`, `dst_ipv6`) have been added to the YAML file to specify the most significant 96 bits of the IPv6 address - for example: [source,python] ---- @@ -720,9 +762,9 @@ If src_ipv6 and dst_ipv6 are not specified in the YAML file, the default is to form IPv4-compatible addresses (where the most signifcant 96-bits are zero). -There is a support for all plugins (control flows that needed to be change). +There is a support for all plugins (control flows that needed to be changed). -*An example:*:: +*Example:*:: [source,bash] ---- $sudo ./t-rex-64 -f cap2l/sfr_delay_10_1g.yaml -c 4 -p -l 100 -d 100000 -m 30 --ipv6 @@ -730,7 +772,7 @@ $sudo ./t-rex-64 -f cap2l/sfr_delay_10_1g.yaml -c 4 -p -l 100 -d 100000 -m 30 - *Limitations:*:: -* TRex cannot generate both IPv4 and IPv6 traffic. The --ipv6 switch must be specified even when using a pcap file containing only IPv6 packets +* TRex cannot generate both IPv4 and IPv6 traffic. The `--ipv6` switch must be specified even when using a pcap file containing only IPv6 packets. *Router configuration:*:: @@ -765,20 +807,18 @@ route-map ipv6_p2_to_p1 permit 10 asr1k(config)#ipv6 route 4000::/64 2001::2 asr1k(config)#ipv6 route 5000::/64 3001::2 ---- -<1> enable ipv6 -<2> add pbr -<3> enable ipv6 routing -<4> mac-addr setting should be like TRex +<1> Enable IPv6 +<2> Add pbr +<3> Enable IPv6 routing +<4> MAC address setting should be like TRex <5> PBR configuraion -=== Source MAC-address mapping using a file +=== Source MAC address mapping using a file -Extending the source MAC-address replacment capability. -It is possible to have a mapping betwean IPv4->MAC using the new `--mac` CLI switch -file format is YAML. +Extends the source MAC address replacment capability. Enables mapping between IPv4->MAC using the new `--mac` CLI switch. The file format is YAML. -*An example:*:: +*Example:*:: [source,bash] ---- $sudo ./t-rex-64 -f cap2/sfr_delay_10_1g.yaml -c 4 -l 100 -d 100000 -m 30 --mac cap2/test_example.yaml @@ -797,22 +837,22 @@ $sudo ./t-rex-64 -f cap2/sfr_delay_10_1g.yaml -c 4 -l 100 -d 100000 -m 30 --ma *Limitations:*:: -. It is assumed that most of the clients has MAC addrees. at least 90% of the IP should have a MAC addrees mapping. +. It is assumed that most clients have a MAC address. At least 90% of IPs should have MAC address mapping. -=== Destination mac address spreadings anchor:mac_spread[] +=== Destination MAC address spreading anchor:mac_spread[] -Using this option, one can send traffic to few destination devices. In normal mode all the packets are sent to the port destination mac-address. -to enable this option add `--mac-spread` to the command line. +Using this option, one can send traffic to few destination devices. In normal mode, all packets are sent to the port destination MAC address. +To enable this option, add `--mac-spread` to the command line. -example: +Example: [source,bash] ---- $sudo ./t-rex-64 -f cap2/http_simple.yaml -d 1000 -m 1000 -c 4 -l 100 --mac-spread 2 ---- -In this case TRex will send to port destination mac and port destination mac +1 -using a switch you could connect TRex to a few DUT. -All the DUTs should return the traffic only to right port source address +In this example, TRex sends to port destination MAC and port destination MAC +1. Using a switch, you can connect TRex to multiple devices under test (DUTs). +All of the DUTs return the traffic only to the correct port source address. +// above, i removed "should" - verify accuracy [source,bash] ---- @@ -830,14 +870,14 @@ TRex(0) -| |-TRex(1) === NAT support TRex can learn dynamic NAT/PAT translation. To enable this feature add `--learn-mode ` to the command line. -In order to learn the NAT translation, TRex must embed information describing the flow a packet belongs to, in the first +To learn the NAT translation, TRex must embed information describing the flow a packet belongs to, in the first packet of each flow. This can be done in two different methods, depending on the chosen . *mode 1:*:: Flow info is embedded in the ACK of the first TCP SYN. -In this mode, there is a limitation that bidirectional UDP templates (e.g. DNS) are not supported. -This mode was developed for testing NAT with firewalls (which usually can't work with mode 2). +In this mode, there is a limitation that bidirectional UDP templates (for example, DNS) are not supported. +This mode was developed for testing NAT with firewalls (which usually do not work with mode 2). *mode 2:*:: @@ -875,13 +915,13 @@ $sudo ./t-rex-64 -f avl/sfr_delay_10_1g_no_bundeling.yaml -c 4 -l 1000 -d 10000 <1> The number of translations with timeout should be zero. Usually this occurs when the router drops the flow due to NAT. <2> Translation not found. This can occur when there is large latency in the router input/output queue. <3> Active number of TRex traslation flows, should be low in the case of low RTT. -<4> A total of TRex translation. May be different from the total number of flows in case template is uni-directional (and such does not need translation). +<4> A total of TRex translation. May be different from the total number of flows if template is uni-directional (and consequently does not need translation). *Configuration for Cisco ASR1000 Series:*:: -The feature was tested with the following configuration and sfr_delay_10_1g_no_bundeling. yaml traffic profile. -Clients address range is 16.0.0.1-16.0.0.255 +This feature was tested with the following configuration and sfr_delay_10_1g_no_bundeling. yaml traffic profile. +Client address range is 16.0.0.1 to 16.0.0.255 [source,python] ---- @@ -910,39 +950,42 @@ access-list 7 permit 16.0.0.0 0.0.0.255 <5> ip nat inside source list 8 pool my overload <6> access-list 8 permit 17.0.0.0 0.0.0.255 ---- -<1> Should be connected to TRex Client port (router inside port) +<1> Must be connected to TRex Client port (router inside port) <2> NAT inside <3> NAT outside <4> Pool of outside address with overload -<5> Should match TRex YAML client range -<6> In case of dual port TRex. +<5> Match TRex YAML client range +<6> In case of dual port TRex + +// verify 1 and 5 above; rephrased *Limitations:*:: . The IPv6-IPv6 NAT feature does not exist on routers, so this feature can work on IPv4 only. . Does not support NAT64. -. Bundeling/plugin support is not fully supported. This means that sfr_delay_10.yaml can't work.Use sfr_delay_10_no_bundeling.yaml instead. +. Bundling/plugin support is not fully supported. Consequently, sfr_delay_10.yaml does not work. Use sfr_delay_10_no_bundeling.yaml instead. +// verify file name "sfr_delay_10_no_bundeling.yaml" above. english spelling is bundling but maybe the filename has the "e" [NOTE] ===================================================================== -* `--learn-verify` is a debug TRex mechanism for testing the TRex learn mechanism. +* `--learn-verify` is a TRex debug mechanism for testing the TRex learn mechanism. * If the router is configured without NAT, it will verify that the inside_ip==outside_ip and inside_port==outside_port. ===================================================================== === Flow order/latency verification ( `--rx-check` ) -In normal mode (without this feature enabled), received traffic is not checked by software. It is only counted by hardware (Intel NIC) for drop packets verification at the end of the test. The only exception is the Latency/Jitter packets. -This is one of the reasons that with TRex, you *cannot* check features that terminate traffic (for example TCP Proxy). -To enable this feature, you should add `--rx-check ` to the command line options, where sample is the sample rate. -1/sample of the flows will be sent to the software for verification. For 40Gb/Sec traffic you can use a sample of 1/128. Watch for Rx CPU% utilization. +In normal mode (without this feature enabled), received traffic is not checked by software. Hardware (Intel NIC) testin for dropped packets occurs at the end of the test. The only exception is the Latency/Jitter packets. +This is one reason that with TRex, you *cannot* check features that terminate traffic (for example TCP Proxy). +To enable this feature, add `--rx-check ` to the command line options, where is the sample rate. +The number of flows that will be sent to the software for verification is (1/(sample_rate). For 40Gb/sec traffic you can use a sample rate of 1/128. Watch for Rx CPU% utilization. -INFO : This feature changes the TTL of the sampled flows to 255 and expects to get packets with TTL 254 or 255 (one routing hop). If you have more than one hop in your setup, use `--hops` to change it to a higher value. More than one hop is possible if there are number of routers betwean TRex client side and TRex server side. + INFO: This feature changes the TTL of the sampled flows to 255 and expects to receive packets with TTL 254 or 255 (one routing hop). If you have more than one hop in your setup, use `--hops` to change it to a higher value. More than one hop is possible if there are number of routers betwean TRex client side and TRex server side. -With this feature enabled, you can verify that: +This feature ensures that: -* Packets get out of DUT in order (from each flow perspective) -* There are no packet drops (No need to wait for the end of the test). Without this flag, you must wait for the end of the test in order to identify packet drops, because there is always a difference between TX and Rx, due to RTT. +* Packets get out of DUT in order (from each flow perspective). +* There are no packet drops (no need to wait for the end of the test). Without this flag, you must wait for the end of the test in order to identify packet drops, because there is always a difference between TX and Rx, due to RTT. .Full example @@ -969,14 +1012,14 @@ Cpu Utilization : 0.1 % active flows: <6> 10, fif: <5> 308, drop: 0, errors: 0 <4> ------------------------------------------------------------------------------------------- ---- -<1> CPU% of the Rx thread. If it is too high *increase* the sample rate. +<1> CPU% of the Rx thread. If it is too high, *increase* the sample rate. <2> Rx Check section. For more detailed info, press 'r' during the test or at the end of the test. <3> Average latency, max latency, jitter on the template flows in microseconds. This is usually *higher* than the latency check packet because the feature works more on this packet. <4> Drop counters and errors counter should be zero. If not, press 'r' to see the full report or view the report at the end of the test. -<5> First in flow (fif)- number of new flows handled by rx thread +<5> fif - First in flow. Number of new flows handled by the Rx thread. <6> active flows - number of active flows handled by rx thread -.Full report by pressing 'r' +.Press R to Display Full Report [source,python] ---- m_total_rx : 2 @@ -991,12 +1034,12 @@ Cpu Utilization : 0.1 % cnt : 2 high_cnt : 2 max_d_time : 1041 usec - sliding_average : 1 usec <3> + sliding_average : 1 usec <2> precent : 100.0 % histogram ----------- h[1000] : 2 - tempate_id_ 0 , errors: 0, jitter: 61 <2> + tempate_id_ 0 , errors: 0, jitter: 61 <3> tempate_id_ 1 , errors: 0, jitter: 0 tempate_id_ 2 , errors: 0, jitter: 0 tempate_id_ 3 , errors: 0, jitter: 0 @@ -1019,16 +1062,19 @@ Cpu Utilization : 0.1 % m_st_stop : 1 m_st_handle : 0 ---- -<1> Any errors shown here -<2> Error per template info -<3> low pass filter on the active average of latency events +<1> Errors, if any, shown here +<2> Low pass filter on the active average of latency events +<3> Error per template info + +// IGNORE: this line added to help rendition. Without this line, the "Notes and Limitations" section below does not appear. -*Limitation:*:: +*Notes and Limitations:*:: -** This feature must be enabled with a latency check (-l). +** This feature must be enabled with a latency check (`-l`). ** To receive the packets TRex does the following: -*** Changes the TTL to 0xff and expects 0xFF (loopback) or oxFE (route). ( use --hop to tune this number) -*** Adds 24 bytes of metadata as ipv4/ipv6 option header +*** Changes the TTL to 0xff and expects 0xFF (loopback) or oxFE (route). (Use `--hop` to configure this value.) +*** Adds 24 bytes of metadata as ipv4/ipv6 option header. +// clarify "ipv4/ipv6 option header" above == Reference @@ -1057,16 +1103,20 @@ Cpu Utilization : 0.1 % cap_ipg_min : 30 <5> cap_override_ipg : 200 <6> ---- -<1> Duration of the test (seconds). Can override using the `-d` option. +<1> Test duration (seconds). Can override using the `-d` option. <2> See the generator section. +// what does note 2 mean? see somewhere else? isn't this simply the generator section? <3> Default source/destination MAC address. The configuration file can override the defaults. <4> TRUE indicates that the IPG is taken from pcap file. -<5> The following two options can set the min ipg in microseconds: ( if (pkt_ipg The following two options can set the min ipg in microseconds: (if (pkt_ipg Value to override (microseconds). -<7> Enable valn feature. See xref:trex_valn[here] for info. -<8> Enable MAC address replacement by Client IP. +// in note 6, clarify "override" +<7> Enable valn feature. See xref:trex_valn[trex_valn section] for info. +<8> Enable MAC address replacement by client IP. ==== Per template section +// clarify "per template" [source,python] ---- @@ -1079,7 +1129,7 @@ Cpu Utilization : 0.1 % one_app_server : true <7> ---- -<1> The name of the template pcap file. It can be relative to the t-rex-64 image or absolute path. The pcap file can include one flow. (Exception: in case of plug-ins). +<1> The name of the template pcap file. Can be a relative path, based on the t-rex-64 image directory, or an absolute path. The pcap file can include one flow. (Exception: in case of plug-ins). <2> Connection per second for m==1 <3> If the global section of the YAML file does not include `cap_ipg : true`, this line sets the inter-packet gap in microseconds. <4> Should be set to the same value as ipg (microseconds). diff --git a/trex_console.asciidoc b/trex_console.asciidoc old mode 100644 new mode 100755 diff --git a/trex_ga.asciidoc b/trex_ga.asciidoc old mode 100644 new mode 100755 diff --git a/trex_stateless-docinfo.html b/trex_stateless-docinfo.html old mode 100644 new mode 100755 diff --git a/vm_doc.asciidoc b/vm_doc.asciidoc old mode 100644 new mode 100755 diff --git a/waf1.css b/waf1.css old mode 100644 new mode 100755 diff --git a/ws_main.py b/ws_main.py old mode 100644 new mode 100755 -- cgit 1.2.3-korg From 841fe33955f1262927dcc05b182640aaa8903231 Mon Sep 17 00:00:00 2001 From: DavidBlock Date: Sun, 8 May 2016 22:35:37 +0300 Subject: images updated --- trex_book.asciidoc | 8 ++++---- trex_stateless.asciidoc | 31 ++++++++++++++++++++----------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index ed45b92c..fbcaf260 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -363,10 +363,10 @@ Another option for loopback is to use link:http://www.fiberopticshare.com/tag/ci //[hh] it is not accurate beacuse with 1Gb/sec you can have this test .Correct loopback -image:images/loopback_right.png[title="rigt"] +image:images/loopback_right.png[title="Correct Loopback"] .Wrong loopback -image:images/loopback_wrong.png[title="rigt"] +image:images/loopback_wrong.png[title="Wrong Loopback"] If you have a 1Gb/Sec Intel NIC (I350) or XL710/X710 NIC, you can do anything you like from the loopback perspective *but* first filter the management port - see xref:trex_config[TRex Configuration]. @@ -975,7 +975,7 @@ access-list 8 permit 17.0.0.0 0.0.0.255 === Flow order/latency verification ( `--rx-check` ) -In normal mode (without this feature enabled), received traffic is not checked by software. Hardware (Intel NIC) testin for dropped packets occurs at the end of the test. The only exception is the Latency/Jitter packets. +In normal mode (without this feature enabled), received traffic is not checked by software. Hardware (Intel NIC) testin for dropped packets occurs at the end of the test. The only exception is the Latency/Jitter packets. This is one reason that with TRex, you *cannot* check features that terminate traffic (for example TCP Proxy). To enable this feature, add `--rx-check ` to the command line options, where is the sample rate. The number of flows that will be sent to the software for verification is (1/(sample_rate). For 40Gb/sec traffic you can use a sample rate of 1/128. Watch for Rx CPU% utilization. @@ -1116,7 +1116,7 @@ Cpu Utilization : 0.1 % <8> Enable MAC address replacement by client IP. ==== Per template section -// clarify "per template" +// clarify "per template" [source,python] ---- diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 303de5c5..41ade63e 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -11,11 +11,13 @@ TRex Stateless support :github_stl_examples_path: https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/automation/trex_control_plane/stl/examples :toclevels: 6 +// PDF version - image width variable ifdef::backend-docbook[] :p_width: 450 :p_width_1: 200 endif::backend-docbook[] +// HTML version - image width variable ifdef::backend-xhtml11[] :p_width: 800 :p_width_1: 400 @@ -111,7 +113,8 @@ A JSON-RPC2 thread in the TRex control plane core provides support for interacti // RPC = Remote Procedure Call, alternative to REST? --YES, no change -image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_2_stateless.png"] +image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_architecture_01.png"] +// OBSOLETE: image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_2_stateless.png"] // Is there a big picture that would help to make the next 11 bullet points flow with clear logic? --explanation of the figure @@ -142,7 +145,7 @@ image::images/trex_2_stateless.png[title="RPC Server Components",align="left",wi * A client syncs with the TRex server to get the state in connection time, and caches the server information locally after the state has changed. * If a client crashes or exits, it syncs again after reconnecting. -image::images/trex_stateless_multi_user.png[title="Multiple users, per interface",align="left",width={p_width}, link="images/trex_stateless_multi_user.png"] +image::images/trex_stateless_multi_user.png[title="Multiple users, per interface",align="left",width={p_width}, link="images/trex_stateless_multi_user_02.png"] For details about the TRex RPC server, see the link:trex_rpc_server_spec.html[RPC specification]. @@ -158,7 +161,7 @@ This Architecture provides the following advantages: // maybe call it "Objects" in title and figure caption -image::images/stateless_objects.png[title="TRex Objects",align="left",width={p_width_1}, link="images/stateless_objects.png"] +image::images/stateless_objects.png[title="TRex Objects",align="left",width={p_width_1}, link="images/stateless_objects_02.png"] * *TRex*: Each TRex instance supports numerous interfaces. // "one or more"? @@ -640,7 +643,8 @@ Python API library: `automation/trex_control_plane/stl/trex_stl_lib`. The TRex console uses the Python API library to interact with the TRex server using the JSON-RPC2 protocol over ZMQ. -image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_2_stateless.png"] +image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_architecture_01.png"] +// OBSOLETE: image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_2_stateless.png"] *File*:: link:{github_stl_examples_path}/stl_bi_dir_flows.py[stl_bi_dir_flows.py] @@ -1265,7 +1269,8 @@ The following example demonstrates 3 streams with different rates (10, 20, 40 PP *Output*:: The folowing figure present the output -image::images/stl_inter.png[title="Interleaving of streams",align="left",width={p_width}, link="images/stl_inter.png"] +image::images/stl_inter.png[title="Interleaving of streams",align="left",width={p_width}, link="images/stl_interleaving_01.png"] +// OBSOLETE: image::images/stl_inter.png[title="Interleaving of streams",align="left",width={p_width}, link="images/stl_inter.png"] *Discussion*:: * Stream #1 @@ -1396,7 +1401,8 @@ TRex>start -f stl/stl/burst_3pkt_60pkt.py --port 0 <2> Multi-burst of 5 bursts of 4 packets with an inter-burst gap of 1 second. -image::images/stl_tut_4.png[title="Streams example",align="left",width={p_width}, link="images/stl_tut_4.png"] +image::images/stl_tut_4.png[title="Example: Multiple Streams",align="left",width={p_width}, link="images/stl_multiple_streams_01.png"] +// OBSOLETE: image::images/stl_tut_4.png[title="Example: Multiple Streams",align="left",width={p_width}, link="images/stl_tut_4.png"] ==== Tutorial: Loops of streams @@ -1802,11 +1808,12 @@ class STLS1(object): For more information how to define headers see link:http://www.secdev.org/projects/scapy/doc/build_dissect.html[Adding new protocols] in the Scapy documentation. -==== Tutorial: Field Engine, many clients +==== Tutorial: Field Engine, Multiple Clients The following example generates traffic from many clients with different IP/MAC addresses to one server. -image::images/stl_tut_12.png[title="client->server",align="left",width={p_width}, link="images/stl_tut_12.png"] +image::images/stl_tut_12.png[title="client->server",align="left",width={p_width}, link="images/stl_multiple_clients_01.png"] +// OBSOLETEimage::images/stl_tut_12.png[title="client->server",align="left",width={p_width}, link="images/stl_tut_12.png"] 1. Send a gratuitous ARP from B->D with server IP/MAC (58.55.1.1). 2. DUT learns the ARP of server IP/MAC (58.55.1.1). @@ -2206,7 +2213,7 @@ The following example creates a stream with no packets. The example uses the int This method can create loops like the following: -image::images/stl_null_stream.png[title="Null stream",align="left",width={p_width/2}, link="images/stl_null_stream.png"] +image::images/stl_null_stream.png[title="Null stream",align="left",width={p_width/2}, link="images/stl_null_stream_02.png"] 1. S1 - Sends a burst of packets, then proceed to stream NULL. 2. NULL - Waits the inter-stream gap (ISG) time, then proceed to S1. @@ -2217,11 +2224,13 @@ Null stream configuration: 2. Number of packets: 0 -==== Tutorial: Field Engine, Barrier stream (Split) +==== Tutorial: Field Engine, Stream Barrier (Split) *(Future Feature - not yet implemented)* -image::images/stl_barrier.png[title="Barrier Stream",align="left",width={p_width}, link="images/stl_barrier.png"] +image::images/stl_barrier.png[title="Stream Barrier",align="left",width={p_width}, link="images/stl_barrier_02.png"] + +image::images/stl_barrier.png[title="Stream Barrier",align="left",width={p_width}, link="images/stl_barrier_03.png"] In some cases there is a need to split the streams to thread in a way that specific stream will continue only after all the threads pass the same path. In the above figure we would like to that stream S3 will start on all the thread after S2 was finished by all the threads -- cgit 1.2.3-korg From ca80fa5b644fbdfe8ed6549d95c537a468073bed Mon Sep 17 00:00:00 2001 From: DavidBlock Date: Sun, 8 May 2016 22:57:01 +0300 Subject: troubleshooting images --- trex_stateless.asciidoc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 41ade63e..ed313f7a 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -114,6 +114,7 @@ A JSON-RPC2 thread in the TRex control plane core provides support for interacti // RPC = Remote Procedure Call, alternative to REST? --YES, no change image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_architecture_01.png"] + // OBSOLETE: image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_2_stateless.png"] // Is there a big picture that would help to make the next 11 bullet points flow with clear logic? --explanation of the figure @@ -1267,9 +1268,10 @@ The following example demonstrates 3 streams with different rates (10, 20, 40 PP *Output*:: -The folowing figure present the output +The folowing figure presents the output. image::images/stl_inter.png[title="Interleaving of streams",align="left",width={p_width}, link="images/stl_interleaving_01.png"] + // OBSOLETE: image::images/stl_inter.png[title="Interleaving of streams",align="left",width={p_width}, link="images/stl_inter.png"] *Discussion*:: -- cgit 1.2.3-korg From 52dc8d3c7d6e4065855ae8dc6153b58d80ecfa66 Mon Sep 17 00:00:00 2001 From: DavidBlock Date: Sun, 8 May 2016 23:01:53 +0300 Subject: troubleshooting images --- trex_stateless.asciidoc | 1 - 1 file changed, 1 deletion(-) diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index ed313f7a..67306525 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -1272,7 +1272,6 @@ The folowing figure presents the output. image::images/stl_inter.png[title="Interleaving of streams",align="left",width={p_width}, link="images/stl_interleaving_01.png"] -// OBSOLETE: image::images/stl_inter.png[title="Interleaving of streams",align="left",width={p_width}, link="images/stl_inter.png"] *Discussion*:: * Stream #1 -- cgit 1.2.3-korg From 72907246a08c1b77dba5607b730a4f77a38e6783 Mon Sep 17 00:00:00 2001 From: DavidBlock Date: Mon, 9 May 2016 00:44:12 +0300 Subject: troubleshooting images --- trex_stateless.asciidoc | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 67306525..dd9a2cf6 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -66,7 +66,7 @@ For information, see the link:trex_manual.html[manual], especially the material The following example shows three streams configured for Continuous, Burst, and Multi-burst traffic. -image::images/stl_streams_example.png[title="Stream example",align="left",width={p_width}, link="images/stl_streams_example.png"] +image::images/stl_streams_example_02.png[title="Multiple stream example",align="left",width={p_width}, link="images/stl_streams_example_02.png"] ==== High level functionality - near future @@ -113,7 +113,7 @@ A JSON-RPC2 thread in the TRex control plane core provides support for interacti // RPC = Remote Procedure Call, alternative to REST? --YES, no change -image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_architecture_01.png"] +image::images/trex_architecture_01.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_architecture_01.png"] // OBSOLETE: image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_2_stateless.png"] @@ -146,7 +146,7 @@ image::images/trex_2_stateless.png[title="RPC Server Components",align="left",wi * A client syncs with the TRex server to get the state in connection time, and caches the server information locally after the state has changed. * If a client crashes or exits, it syncs again after reconnecting. -image::images/trex_stateless_multi_user.png[title="Multiple users, per interface",align="left",width={p_width}, link="images/trex_stateless_multi_user_02.png"] +image::images/trex_stateless_multi_user_02.png[title="Multiple users, per interface",align="left",width={p_width}, link="images/trex_stateless_multi_user_02.png"] For details about the TRex RPC server, see the link:trex_rpc_server_spec.html[RPC specification]. @@ -162,7 +162,7 @@ This Architecture provides the following advantages: // maybe call it "Objects" in title and figure caption -image::images/stateless_objects.png[title="TRex Objects",align="left",width={p_width_1}, link="images/stateless_objects_02.png"] +image::images/stateless_objects_02.png[title="TRex Objects",align="left",width={p_width_1}, link="images/stateless_objects_02.png"] * *TRex*: Each TRex instance supports numerous interfaces. // "one or more"? @@ -497,7 +497,7 @@ In this example all the packets will be routed to `TenGigabitEthernet0/1/0` port return [ STLStream( packet = pkt,mode = STLTXCont()) ] ---- -<1> This use of the `direction` flag here causes a different packet to be sent for each direction. +<1> This use of the `direction` flag causes a different packet to be sent for each direction. ==== Tutorial: Connect from a remote server @@ -644,7 +644,8 @@ Python API library: `automation/trex_control_plane/stl/trex_stl_lib`. The TRex console uses the Python API library to interact with the TRex server using the JSON-RPC2 protocol over ZMQ. -image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_architecture_01.png"] +image::images/trex_architecture_01.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_architecture_01.png"] + // OBSOLETE: image::images/trex_2_stateless.png[title="RPC Server Components",align="left",width={p_width}, link="images/trex_2_stateless.png"] *File*:: link:{github_stl_examples_path}/stl_bi_dir_flows.py[stl_bi_dir_flows.py] @@ -1024,7 +1025,7 @@ $ ./stl-sim -f stl/udp_1pkt_simple.py -o b.pcap -l 10 Contents of the output pcap file produced by the simulator in the previous step: -image::images/stl_tut_1.png[title="TRex simulator output stored in pcap file",align="left",width={p_width}, link="images/stl_tut_1.png.png"] +image::images/stl_tut_1.png[title="TRex simulator output stored in pcap file",align="left",width={p_width}, link="images/stl_tut_1.png"] Adding `--json` displays the details of the JSON command for adding a stream: @@ -1266,12 +1267,11 @@ The following example demonstrates 3 streams with different rates (10, 20, 40 PP <3> Defines streams with rate of 20 PPS. <4> Defines streams with rate of 40 PPS. - *Output*:: -The folowing figure presents the output. -image::images/stl_inter.png[title="Interleaving of streams",align="left",width={p_width}, link="images/stl_interleaving_01.png"] +The folowing figure presents the output. +image::images/stl_interleaving_01.png[title="Interleaving of streams",align="left",width={p_width}, link="images/stl_interleaving_01.png"] *Discussion*:: * Stream #1 @@ -1402,8 +1402,10 @@ TRex>start -f stl/stl/burst_3pkt_60pkt.py --port 0 <2> Multi-burst of 5 bursts of 4 packets with an inter-burst gap of 1 second. -image::images/stl_tut_4.png[title="Example: Multiple Streams",align="left",width={p_width}, link="images/stl_multiple_streams_01.png"] -// OBSOLETE: image::images/stl_tut_4.png[title="Example: Multiple Streams",align="left",width={p_width}, link="images/stl_tut_4.png"] +The following illustration does not fully match the Python example cited above. It has been simplified, such as using a 0.5 second ISG, for illustration purposes. + +image::images/stl_multiple_streams_01.png[title="Example of multiple streams",align="left",width={p_width}, link="images/stl_multiple_streams_01.png"] + ==== Tutorial: Loops of streams @@ -1813,7 +1815,8 @@ For more information how to define headers see link:http://www.secdev.org/projec The following example generates traffic from many clients with different IP/MAC addresses to one server. -image::images/stl_tut_12.png[title="client->server",align="left",width={p_width}, link="images/stl_multiple_clients_01.png"] +image::images/stl_multiple_clients_01.png[title="Multiple clients to single server",align="left",width={p_width}, link="images/stl_multiple_clients_01.png"] + // OBSOLETEimage::images/stl_tut_12.png[title="client->server",align="left",width={p_width}, link="images/stl_tut_12.png"] 1. Send a gratuitous ARP from B->D with server IP/MAC (58.55.1.1). @@ -2214,7 +2217,7 @@ The following example creates a stream with no packets. The example uses the int This method can create loops like the following: -image::images/stl_null_stream.png[title="Null stream",align="left",width={p_width/2}, link="images/stl_null_stream_02.png"] +image::images/stl_null_stream_02.png[title="Null stream",align="left",width={p_width/2}, link="images/stl_null_stream_02.png"] 1. S1 - Sends a burst of packets, then proceed to stream NULL. 2. NULL - Waits the inter-stream gap (ISG) time, then proceed to S1. @@ -2229,13 +2232,9 @@ Null stream configuration: *(Future Feature - not yet implemented)* -image::images/stl_barrier.png[title="Stream Barrier",align="left",width={p_width}, link="images/stl_barrier_02.png"] - -image::images/stl_barrier.png[title="Stream Barrier",align="left",width={p_width}, link="images/stl_barrier_03.png"] - -In some cases there is a need to split the streams to thread in a way that specific stream will continue only after all the threads pass the same path. -In the above figure we would like to that stream S3 will start on all the thread after S2 was finished by all the threads +In some situations, it is necessary to split streams into threads in such a way that specific streams will continue only after all the threads have passed the same path. In the figure below, a barrier ensures that stream S3 starts only after all threads of S2 are complete. +image::images/stl_barrier_03.png[title="Stream Barrier",align="left",width={p_width}, link="images/stl_barrier_03.png"] ==== Tutorial: Pcap file to one stream -- cgit 1.2.3-korg From dfa86ce996cff9557d0aef78cea3d13827780fe0 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 9 May 2016 10:45:01 +0300 Subject: minor QSFP+ fix --- trex_book.asciidoc | 4 ++-- trex_toc.asciidoc | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index f13c63ae..6f619b16 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -149,8 +149,8 @@ WARNING: Intel X710 NIC for example FH X710DA4FHBLK will work *only* with Intel [options="header",cols="1,1",width="70%"] |================= | QSFP+ | Example -| QSFP+ SR4 optics | APPROVED OPTICS For Intel NICS, Cisco QSFP-40G-SR4-S does *not* work link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] -| QSFP+ LR-4 Optics | APPROVED OPTICS For Intel NICS , Cisco QSFP-40G-LR4-S does *not* work link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +| QSFP+ SR4 optics | APPROVED OPTICS For Intel NICS, Cisco QSFP-40G-SR4-S will work *only* with XL710 NIC vendors with Open Optic like Silicom link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] +| QSFP+ LR-4 Optics | APPROVED OPTICS For Intel NICS , Cisco QSFP-40G-LR4-S will work *only* with XL710 NIC vendors with Open Optic like Silicom link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] | QSFP Active Optical Cables (AoC) | QSFP-H40G-AOC link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-660083.html[here] | QSFP+ Intel Ethernet Modular Optics | | QSFP+ DA twin-ax cables | diff --git a/trex_toc.asciidoc b/trex_toc.asciidoc index 46c51c2c..ad3b7161 100644 --- a/trex_toc.asciidoc +++ b/trex_toc.asciidoc @@ -172,6 +172,7 @@ ifdef::backend-xhtml11[] + + + + + + + + + + + + + +""" + +def do_replace (input_file,contents,look,str_replaced): + if contents.count(look)!=1 : + raise Exception('Cannot find {0} in file {1} '.format(look,input_file)) + + return contents.replace(look, str_replaced) + + def toc_fixup_file (input_file, out_file, @@ -265,9 +635,13 @@ def toc_fixup_file (input_file, file = open(input_file) contents = file.read() - replaced_contents = contents.replace('input_replace_me.json', json_file_name) + + contents = do_replace(input_file,contents,'', TOC_HEAD); + contents = do_replace(input_file,contents,'', TOC_END) + contents = do_replace(input_file,contents,'input_replace_me.json', json_file_name) + file = open(out_file,'w') - file.write(replaced_contents) + file.write(contents) file.close(); @@ -280,7 +654,7 @@ def convert_to_html_toc_book(task): tmp = os.path.splitext(task.outputs[0].abspath())[0]+'.tmp' json_out_file_short = os.path.splitext(task.outputs[0].name)[0]+'.json' - cmd='{0} -a stylesheet={1} -a icons=true -a docinfo -d book -a max-width=55em -o {2} {3}'.format( + cmd='{0} -a stylesheet={1} -a icons=true -a docinfo -d book -o {2} {3}'.format( task.env['ASCIIDOC'], task.inputs[1].abspath(), tmp, @@ -601,5 +975,10 @@ def publish_both(bld): publish_ext(bld) - +def test(bld): + # copy all the files to our web server + toc_fixup_file ('build/trex_stateless.tmp', + 'build/trex_stateless.html', + 'trex_stateless.json') + -- cgit 1.2.3-korg From 12ec7d990f8b50341f4df24bac8f45c82bc9bdf3 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 17 May 2016 16:25:54 +0300 Subject: minor --- trex_book.asciidoc | 22 +++++++++++++++------- trex_rpc_server_spec.asciidoc | 4 ++-- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 2bb46b14..679950bd 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -607,8 +607,9 @@ include::trex_book_basic.asciidoc[] == Advanced features +=== VLAN Trunk support -=== VLAN Trunk support anchor:trex_valn[] +anchor:trex_valn[] The VLAN Trunk TRex feature attempts to solve the router port bandwidth limitation when the traffic profile is asymmetric. Example: Asymmetric SFR profile. This feature converts asymmetric traffic to symmetric, from the port perspective, using router sub-interfaces. @@ -740,7 +741,7 @@ With this feature, TRex replaces the source MAC address with the client IP addre <1> In this case, the client side MAC address looks like this: SRC_MAC = IPV4(IP) + 00:00 -=== IPv6 support (`--ipv6`) +=== IPv6 support Support for IPv6 includes: @@ -839,7 +840,9 @@ $sudo ./t-rex-64 -f cap2/sfr_delay_10_1g.yaml -c 4 -l 100 -d 100000 -m 30 --ma . It is assumed that most clients have a MAC address. At least 90% of IPs should have MAC address mapping. -=== Destination MAC address spreading anchor:mac_spread[] +=== Destination MAC address spreading + +anchor:mac_spread[] Using this option, one can send traffic to few destination devices. In normal mode, all packets are sent to the port destination MAC address. To enable this option, add `--mac-spread` to the command line. @@ -973,7 +976,7 @@ access-list 8 permit 17.0.0.0 0.0.0.255 * If the router is configured without NAT, it will verify that the inside_ip==outside_ip and inside_port==outside_port. ===================================================================== -=== Flow order/latency verification ( `--rx-check` ) +=== Flow order/latency verification In normal mode (without this feature enabled), received traffic is not checked by software. Hardware (Intel NIC) testin for dropped packets occurs at the end of the test. The only exception is the Latency/Jitter packets. This is one reason that with TRex, you *cannot* check features that terminate traffic (for example TCP Proxy). @@ -1139,7 +1142,9 @@ Cpu Utilization : 0.1 % -=== Configuration YAML anchor:trex_config[] +=== Configuration YAML + +anchor:trex_config[] The configuration file, in YAML format, configures TRex behavior, including: @@ -1320,7 +1325,9 @@ We added configuration to the /etc/trex_cfg.yaml: This gave best results: with *\~98 Gb/s* TX BW and c=7, CPU utilization became *~21%*! (40% with c=4) -=== Command line options anchor:cml-line[] +=== Command line options + +anchor:cml-line[] *-f=TRAFIC_YAML_FILE*:: Traffic YAML configuration file. @@ -1489,7 +1496,8 @@ $./bp-sim-64-debug -f avl/sfr_delay_10_1g.yaml -v 1 <2> CSV for all the templates -=== firmware update to XL710/X710 anchor:xl710-firmware[] +=== firmware update to XL710/X710 +anchor:xl710-firmware[] To upgrade the firmware follow this diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index ebdd5f9a..d9b80efd 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -842,7 +842,7 @@ please also consider the following constraints: * *performance* - this will have performance impact as rx packets will be examined * *override* - up to 10 bytes at the end of the packet will be overidden by the meta data required -===== The bytes needed for activating 'rx_stats': +===== The bytes needed for activating rx_stats * *stream_id* consumes 2 bytes * *seq_enabled* consumes 4 bytes @@ -1323,7 +1323,7 @@ over the server and preparing to perform work * *get_stream_list* - for every port, get the list and sync the GUI * *get_stream* - for every stream in a port list, get the stream info and sync the GUI -=== Simple Traffic With Adding / Editing Streams +=== Simple Traffic With Adding/Editing Streams describes a simple scenario where a user wants to add or edit one or more streams to one or more ports -- cgit 1.2.3-korg From 72e0cfd8200aca0fb2ded31c620bde38191a0d02 Mon Sep 17 00:00:00 2001 From: DavidBlock Date: Wed, 18 May 2016 12:02:28 +0300 Subject: Edited stl_loop_count_01b.png and added visio_drawings/illustrations_stateless.vsd --- images/stl_loop_count_01b.png | Bin 5416 -> 5524 bytes visio_drawings/illustrations_stateless.vsd | Bin 0 -> 5082624 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100755 visio_drawings/illustrations_stateless.vsd diff --git a/images/stl_loop_count_01b.png b/images/stl_loop_count_01b.png index 3f5620cd..891f1a9b 100755 Binary files a/images/stl_loop_count_01b.png and b/images/stl_loop_count_01b.png differ diff --git a/visio_drawings/illustrations_stateless.vsd b/visio_drawings/illustrations_stateless.vsd new file mode 100755 index 00000000..aa827a21 Binary files /dev/null and b/visio_drawings/illustrations_stateless.vsd differ -- cgit 1.2.3-korg From c60af146dc5b615b9c24d7e0e69ed97768f91953 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 18 May 2016 20:42:45 +0300 Subject: another TOC fix that does not work --- ws_main.py | 49 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 2 deletions(-) diff --git a/ws_main.py b/ws_main.py index 2ac89800..7e676224 100755 --- a/ws_main.py +++ b/ws_main.py @@ -278,6 +278,51 @@ TOC_HEAD = """
+ + + +
""" @@ -410,7 +455,7 @@ TOC_END = """ .jstree-clicked{ - color: green !important; + color: white !important; } @@ -564,7 +609,7 @@ TOC_END = """ // Close TOC by default if it is mobile function checkMobile(){ if(isMobileDevice()){ - closTOC(); + isOpen=false; $(".ui-resizable-e").hide(); } } -- cgit 1.2.3-korg From 5419e375097c9d66eb7691866886b6580e471538 Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Thu, 19 May 2016 12:49:03 +0300 Subject: per flow latency tutorial - not final version --- trex_stateless.asciidoc | 102 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 95 insertions(+), 7 deletions(-) diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 225f60e1..f5b02619 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -510,7 +510,7 @@ In this example all the packets will be routed to `TenGigabitEthernet0/1/0` port ==== Tutorial: Connect from a remote server -*Goal*:: Connect by console from remote machine to a TRex server +*Goal*:: Connect by console from remote machine to a TRex server *Check that TRex server is operational*:: @@ -2940,7 +2940,7 @@ trex> * Per stream statistics are implemented using hardware assist when possible (examples: Intel X710/XL710 NIC flow director rules). * With other NICs (examples: Intel I350, 82599), per stream statistics are implemented in software. * Implementation: -** User chooses 32-bit packet group ID (pg_id) for each stream that need statistic reporting. Same pg_id can be used for more than one stream. In this case, statistics for all streams with the same pg_id will be combined. +** User chooses 32-bit packet group ID (pg_id) for each stream that need statistic reporting. Same pg_id can be used for more than one stream. In this case, statistics for all streams with the same pg_id will be combined. ** The IPv4 identification field of the stream is changed to a value within a reserved range (0xff00 to 0xffff). Note that if a stream for which no statistics are needed has an IPv4 Identification in the reserved range, it is changed (the left bit becomes 0). ** Software implementation: Hardware rules are used to direct packets from relevant streams to rx thread, where they are counted. ** Hardware implementation: Hardware rules are inserted to count packets from relevant streams. @@ -2954,7 +2954,6 @@ trex> * Maximum number of concurrent streams (with different pg_id) on which statistics may be collected: 127 Two examples follow, one using the console and the other using the Python API. -// immediately below is the console example; where's the Python API example? *Console*:: @@ -3097,13 +3096,102 @@ The following shows a flow_stats object for 3 PG IDs after a specific run: ---- -==== Tutorial: Per stream latency/jitter +==== Tutorial: Per stream latency/jitter/packet errors -// [TODO] +* Per stream latency/jitter is implemented by software. This is an extension of the per stream statistics. Meaning, whenever you choose to get latency info for a stream, the statistics described +in the "Per stream statistics" section is also available. +* Implementation: +** User chooses 32-bit packet group ID (pg_id) for each stream that need latency reporting. pg_id should be unique per stream. +** The IPv4 identification field of the stream is changed to some defined constant value (in the reserved range described in the "per stream statistics" section), in order to signal the hardware to pass the stream to software. +** Last 16 bytes of the packet payload is used to pass needed information. Information contains ID of the stream, packet sequence number (per stream), timestamp of packet transmission. -*(Future Feature - not yet implemented)* +* Gathered info (per stream) is sent using a link:http://zguide.zeromq.org/[ZMQ] async channel to clients. + +*Limitations*:: + +* The feature supports 2 packet types: +** IPv4 over Ethernet +** IPv4 with one VLAN tag +* Packets must contain at least 16 bytes payload. +* Each stream must have unique pg_id number. This also means that a given "latency collecting" stream can't be transmitted from two interfaces in parallel (internally it means that there are two streams). +* Maximum number of concurrent streams (with different pg_id) on which latency info may be collected: 128 (This is in addition to the streams which collect per stream statistics). + +Two examples follow, one using the console and the other using the Python API. + +*Console*:: + +The following simple traffic profile defines 2 streams and configures them with 2 different PG IDs. + +*File*:: link:{github_stl_path}/flow_stats_latency.py[stl/flow_stats_latency.py] + +[source,python] +---- + +class STLS1(object): + + def get_streams (self, direction = 0): + return [STLStream(packet = STLPktBuilder(pkt ="stl/yaml/udp_64B_no_crc.pcap"), + mode = STLTXCont(pps = 1000), + flow_stats = STLFlowLatencyStats(pg_id = 7)), <1> + + STLStream(packet = STLPktBuilder(pkt ="stl/yaml/udp_594B_no_crc.pcap"), + mode = STLTXCont(pps = 5000), + flow_stats = STLFlowLatencyStats(pg_id = 12)) <2> + ] + + +---- +<1> Assigned to PG ID 7 +<2> Assigned to PG ID 12 + +The following command injects this to the console and uses the textual user interface (TUI) to display the TRex activity: + +[source,bash] +---- +trex>start -f stl/flow_stats.py --port 0 + +Removing all streams from port(s) [0]: [SUCCESS] + + +Attaching 2 streams to port(s) [0]: [SUCCESS] + + +Starting traffic on port(s) [0]: [SUCCESS] + +155.81 [ms] + +trex>tui -// note TODO +Latency Statistics (usec) + + PG ID | 12 | 13 + ---------------------------------------------- + Max latency | 0 | 0 #<1> + Avg latency | 5 | 5 #<2> + -- Window -- | | + Last (max) | 3 | 4 #<3> + Last-1 | 3 | 3 + Last-2 | 4 | 4 + Last-3 | 4 | 3 + Last-4 | 4 | 4 + Last-5 | 3 | 4 + Last-6 | 4 | 3 + Last-7 | 4 | 3 + Last-8 | 4 | 4 + Last-9 | 4 | 3 + --- | | + Jitter | 0 | 0 #<4> + ---- | | + Errors | 0 | 0 #<5> + +---- +<1> Maximum latency measured over the stream lifetime (in usec). +<2> Average latency over the stream lifetime (usec). +<3> Maximum latency measured between last two data reads from server (We currently read every 0.5 second). + Numbers below are maximum latency for previous measuring periods, so we get latency history for last few seconds. +<4> Jitter of latency measurements. +<5> Indication of number of errors (packet lost/out of order/duplicates) that occured). In the future it will be possible to 'zoom in', to see specific counters. + For now, if you need to see specific counters, you can use the Python API. ==== Tutorial: HLT traffic profile -- cgit 1.2.3-korg From c641f7dd49645b0f0f8b7ca2047f09da3634bf7b Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 19 May 2016 12:57:20 +0300 Subject: add pointer to stateless preso --- trex_index.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/trex_index.asciidoc b/trex_index.asciidoc index 41464c9d..ca8a0da6 100644 --- a/trex_index.asciidoc +++ b/trex_index.asciidoc @@ -19,6 +19,8 @@ include::trex_ga.asciidoc[] link:trex_preso.html[Old Stateful] | Stateful DPDK summit | link:http://www.slideshare.net/harryvanhaaren/trex-traffig-gen-hanoch-haim[DPDK summit] +| New Stateless support | +http://www.slideshare.net/HanochHaim/trex-realistic-traffic-generator-stateless-support[Stateless support] |================= -- cgit 1.2.3-korg From c53d6092de19cc9a21009bd20fbc8731bbf11e3a Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 19 May 2016 13:01:37 +0300 Subject: fix TOC by Bilal --- ws_main.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/ws_main.py b/ws_main.py index 7e676224..c9094ccf 100755 --- a/ws_main.py +++ b/ws_main.py @@ -279,6 +279,23 @@ TOC_HEAD = """
+ + + + + + + + + + + + + + - - - - - - - + + + + + diff --git a/trex_faq.asciidoc b/trex_faq.asciidoc new file mode 100644 index 00000000..8927750e --- /dev/null +++ b/trex_faq.asciidoc @@ -0,0 +1,323 @@ +TRex Stateless support +====================== +:author: TRex team +:email: trex.tgen@gmail.com +:revnumber: 0.1 +:quotes.++: +:numbered: +:web_server_url: http://trex-tgn.cisco.com/trex +:local_web_server_url: csi-wiki-01:8181/trex +:github_stl_path: https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/stl +:github_stl_examples_path: https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/automation/trex_control_plane/stl/examples +:toclevels: 6 + +include::trex_ga.asciidoc[] + +// PDF version - image width variable +ifdef::backend-docbook[] +:p_width: 450 +:p_width_1: 200 +:p_width_1a: 100 +:p_width_1c: 150 +:p_width_lge: 500 +endif::backend-docbook[] + +// HTML version - image width variable +ifdef::backend-xhtml11[] +:p_width: 800 +:p_width_1: 400 +:p_width_1a: 650 +:p_width_1a: 400 +:p_width_lge: 900 +endif::backend-xhtml11[] + + +== FAQ + +=== General + +==== What are the common use cases for TRex? +1. High scale benchmarks for Stateful features that inspect the traffic like Firewall/NAT/DPI +2. Higg scale DDOS Attacks see link:https://www.incapsula.com/blog/trex-traffic-generator-software.html[Why TRex is Our Choice of Traffic Generator Software] +3. High scale and flexible testing for Switch (e.g. RFC2544)- see link:https://wiki.fd.io/view/CSIT[fd.io] +4. Scale tests for a number of clients/servers/VLAN for controller base testing + +[NOTE] +===================================== +A feature that terminate TCP can't be tested yet +===================================== + +==== Who is using TRex? + +Cisco systems, Intel, Imperva, Vasona networks + +==== Can TRex run on the hypervisor with virtual NICS? + +Yes, currently there is a need to have 2-3 cores and 4GB of memory (4GB can reduce significantly if required) + +The limitations: + +1. Each dual NICS (e.g. VMXNET3) you can have maximum one core +2. vSwitch can limit the maximum PPS to ~1MPPS +3. Latency results would not be accurate + +==== Why not all DPDK drivers are supported? +1. flow-director accelerator is used for each NIC type. +2. We have regression per each recommended NIC + +==== Is Cisco VIC supported? +No. The driver does not pass our regression + +==== Do you have 100Gb/s NIC QSFP+ support? +Not yet, working on FM10K and Mellanox Connectx5 + +==== Do you have GUI? +We are not developing it. Have a look here for one Stateless GUI from Exalt link:https://groups.google.com/forum/#!searchin/trex-tgn/sari%7Csort:relevance/trex-tgn/R92-N2Yjy2Q/DIUe06YCBgAJ[here] + + +==== What is the maximum number of ports per TRex application? +12 ports + +==== I can't see all 12 ports statistics on TRex server +Right, we present only the first four ports statistics (global statistics are still ok, like total Tx) because there is no console space. +Use the GUI or API to get per port statistics. + +==== Can I run multiple TRex servers on the same machine? +Yes. + +==== Can I use multiple type of ports with the same TRex server? +No. + +==== TRex on a VM with PCI Pass through or bare metal? +The bare metal will have a lower latency results + +==== I want to report an issue + +Two options: +1. Send email to trex.tgen@gmail.com +2. Open a defect here link:https://trex-tgn.cisco.com/youtrack[youtrack] + + +==== I want to contribute +You are welcome, just create a gitHub pool request + +=== Stateful + +==== TRex is connected to a Switch and we observe many drop packets at TRex startup +A Switch might be configured with spanning tree enable. TRex initialized the Interface port at startup and making the spanning tree drop the packets. +Disable spanning tree can help. On Cisco nexus it would be `switch(config)#spanning-tree port type edge` +This issue would be fixed when we consolidate Stateful and Stateless RPC +==== I can't see RX packets +TRex does not support ARP yet, you should configure the DUT to send the packets to the TRex port MAC-ADDR. From Stateless mode, you can change the port mode to promiscuous + +==== Why the performance is low? + +TRex performance depends on many factors: + +1. make sure trex_cfg.yaml is optimal see "platform" section in manual +2. More concurrent flows will reduce the performance +3. Short flows with one/two packets (e.g. cap2/dns.yaml ) will give the worst performance + +==== Do you have plans to add TCP stack? + +Yes + + +=== Stateless + +==== Is pyATS supported as client framework + +Yes. both Python 3 and Python 2 + +==== Python API does not work on my Mac with ZMQ library issue + +We are using Python ZMQ wrapper. it needs to compiled per platform and we have a support for many platforms but not all of them +You will need to build ZMQ for your platform if it is not part of the package. + +[source,Python] +---- + from .trex_stl_client import STLClient, LoggerApi + File "../trex_stl_lib/trex_stl_client.py", line 7, in + from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage + File "../trex_stl_lib/trex_stl_jsonrpc_client.py", line 3, in + import zmq + File "/home/shilwu/trex_client/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/__init__.py", line 32, in + _libzmq = ctypes.CDLL(bundled[0], mode=ctypes.RTLD_GLOBAL) + File "/usr/local/lib/python2.7/ctypes/__init__.py", line 365, in __init__ + self._handle = _dlopen(self._name, mode) +OSError: /lib64/libc.so.6: version `GLIBC_2.14' not found (required by /home/shilwu/trex_client/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/libzmq.so.3) + +---- + + +==== Is multi-user supported + +Yes. + +==== Can I create a corrupted packet? + +Yes. You can build any packet you like using Scapy. +However, there is no way to corrupt FCS yet. + +==== Why the performance is low? +What would reduce the performance: +1. More concurent streams +2. Complex field engine program + +Adding 'cache' directive can improve the performance see + +see link:trex_stateless.html#_tutorial_field_engine_significantly_improve_performance[here] + +and try this: + +[source,bash] +---- +$start -f stl/udp_1pkt_src_ip_split.py -m 100% +---- + +[source,python] +---- + + vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", + min_value="10.0.0.1", + max_value="10.0.0.255", + size=4, step=1,op="inc"), + + STLVmWrFlowVar (fv_name="ip_src", + pkt_offset= "IP.src" ), + + STLVmFixIpv4(offset = "IP") + ], + split_by_field = "ip_src", + cache_size =255 # the cache size <1> + ); +---- +<1> cache + + +==== I want to generate gratuitous ARP/NS IPv6 + +see example link:trex_stateless.html#_tutorial_field_engine_many_clients_with_arp[here] + + +==== What is the diffrance betwean Stateful and Stateles + +see link:trex_stateless.html#_stateful_vs_stateless[here] + +==== How do I create a deterministic random stream variable + +use `random_seed` per stream + +[source,python] +---- + return STLStream(packet = pkt, + random_seed = 0x1234, + mode = STLTXCont()) +---- + +==== Can I have a synconization betwean different stream variables + +No. each stream has it own, seperate field engine program + + +==== Do you have plan to have LUAJit as a field engine program + +It is a great idea to add it, we are looking for someone to contribute this support + + +==== Streams with latency enabled does not amplified by multiplier, why? + +The reason for the above (besides being a CPU constrained feature) is that most time the use case is that the latency is being used to validate that the traffic profile can stand under stress. +This way you can use the multiplier to amplify the main traffic, but not the 'testing probe'. +to mitigate this in the Console, you use 'tunables' for this +You can add in the Python profile a tunable that will specify the latency stream rate and you can provide it during the start in the console or in the API. + +tunables can be added through the console using 'start ... -t latency_rate=XXXXX' + +or using the Python API directly (for automation): +STLProfile.load_py(..., latency_rate = XXXXX) + +==== latency and statistic per stream is not supported for all type of packets + +Correct. Flow-director for counting/steering the packets. each NIC has its own support + +==== Java API instead of Python API + +Q:: I want to use the Python API via Java (with Jython), apparently, I cannot import Scapy modules with jython. +The way I see it I have two options: + +1. Creating python scripts and call them from java (with ProcessBuilder for example) +2. Call directly to the Trex server over RPC from Java + +However, option 2 seems like a re-writing the API for Java (which I am not going to do) +On the other hand, with option 1, once the script is done, the client object destroyed and I cannot use it anymore in my tests. + +Any ideas on what is the best way to use Trex within JAVA? + +A:: + +The power of our Python API is the scapy integration for simple building of the packets / fueled engine +There is a proxy over RPC that you can extend to your use cases. It has basic function like connect/start/stop/get_stats +You could use it to send some pcap file via ports, or so-called python profiles, which you can configure by passing different variables (so-called tunabels) via the RPC. +Take a look link:trex_stateless.html#_using_stateless_client_via_json_rpc[using_stateless_client_via_json_rpc] +You can even dump the profile as a string and move it to the proxy to run it (but it is a security hole as anything can be run on the TRex server) + +see link:https://github.com/zverevalexei/trex-http-proxy[here] for simple Web server proxy + +==== Where can I find a reference to RFC2544 using TRex + +link:https://gerrit.fd.io/r/gitweb?p=csit.git;a=tree;f=resources;hb=HEAD[here] + +=== Installation + +==== During OS installation, screen is skewed / error Out of range / resolution not supported etc + + * Fedora - during installation, choose "Troubleshooting" -> Install in basic graphic mode + * Ubuntu - try Ubuntu server, which has textual installation + +==== How to determine relation between TRex ports and Router ports + +Run the TRex with following command and check incoming packet on router interfaces: + +[source,bash] +---- + sudo ./t-rex-64 -f cap2/dns.yaml --lm 1 --lo -l 1000 -d 100 +---- + +How to determine relation between Virtual OS ports and Hypervisor ports + +Compare the MACs address + name of interface, for example: + +[source,bash] +---- +* > ifconfig + +*eth0* Link encap:Ethernet *HWaddr 00:0c:29:2a:99:b2* + + ... + +* > sudo ./dpdk_setup_ports.py -s + +*03:00.0* 'VMXNET3 Ethernet Controller' *if=eth0* drv=vmxnet3 unused=igb_uio +---- + +[NOTE] +===================================== +If at TRex side the NICs are not visible to ifconfig, run: + +.... +sudo ./dpdk_nic_bind.py -b <1> <2> +.... + +<1> driver name - vmxnet3 for VMXNET3 and e1000 for E1000 +<2> 03:00.0 for example + +We are planning to add MACs to `./dpdk_setup_ports.py -s` +===================================== + +==== TRex traffic does not show up on Wireshark, so I can not capture the traffic from the TRex port + +TRex uses DPDK which takes ownership of the ports, so using Wireshark is not possible. You can use switch with port mirroring to capture the traffic. + +==== How can I map betwean TRex ports-id (e.g. port 0) to physical router interface + +Load TRex in a stateless mode and run traffic from each port + + + diff --git a/trex_index.asciidoc b/trex_index.asciidoc index 36e8a0a0..158119c5 100644 --- a/trex_index.asciidoc +++ b/trex_index.asciidoc @@ -27,7 +27,7 @@ http://www.slideshare.net/HanochHaim/trex-realistic-traffic-generator-stateless- |================= | Description | Name | FAQ | -link:trex_manual.html#_troubleshoot_common_problems_faq[FAQ] +link:trex_faq.html[FAQ] | Installation Guide | link:trex_manual.html#_download_and_installation[Installation] | Release Notes | diff --git a/ws_main.py b/ws_main.py index 7b32409b..930cbc34 100755 --- a/ws_main.py +++ b/ws_main.py @@ -890,7 +890,7 @@ def build(bld): bld(rule=convert_to_pdf_book,source='trex_book.asciidoc waf.css', target='trex_book.pdf', scan=ascii_doc_scan) bld(rule=convert_to_pdf_book,source='trex_stateless.asciidoc waf.css', target='trex_stateless.pdf', scan=ascii_doc_scan) - + bld(rule=convert_to_pdf_book,source='draft_trex_stateless.asciidoc waf.css', target='draft_trex_stateless.pdf', scan=ascii_doc_scan) bld(rule=convert_to_pdf_book,source='trex_vm_manual.asciidoc waf.css', target='trex_vm_manual.pdf', scan=ascii_doc_scan) @@ -909,6 +909,9 @@ def build(bld): bld(rule=convert_to_html_toc_book, source='trex_book.asciidoc waf.css', target='trex_manual.html',scan=ascii_doc_scan); + bld(rule=convert_to_html_toc_book, + source='trex_faq.asciidoc waf.css', target='trex_faq.html',scan=ascii_doc_scan); + bld(rule=convert_to_html_toc_book, source='trex_rpc_server_spec.asciidoc waf.css', target='trex_rpc_server_spec.html',scan=ascii_doc_scan); -- cgit 1.2.3-korg From 6368dbb61a30fc83c00c0d6fa3edef0ef22374ac Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 4 Aug 2016 17:27:31 +0300 Subject: minor --- release_notes.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 667019f2..18d20695 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -33,7 +33,7 @@ endif::backend-docbook[] * Support graceful shutdown command * Console - support L1 BPS using `-m 10bpsl1` see link:http://trex-tgn.cisco.com/youtrack/issue/trex-230[trex-230] * Improve TUI refresh time -* Support IPV6 latency streams (support is available for all interface types except 82599) see link:https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_tutorial_per_stream_latency_jitter_packet_errors +* Support IPV6 latency streams (support is available for all interface types except 82599) see link:trex_stateless.html#_tutorial_per_stream_latency_jitter_packet_errors [IMPORTANT] ===================================== -- cgit 1.2.3-korg From b4704a0e19d2192165ae0d8fe1a8c87ca9638145 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Sun, 7 Aug 2016 13:48:48 +0300 Subject: minor FAQ changes --- release_notes.asciidoc | 2 +- trex_faq.asciidoc | 38 +++++++++++++++++++++++++++++--------- trex_stateless.asciidoc | 2 +- 3 files changed, 31 insertions(+), 11 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 18d20695..7ecce8ef 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -33,7 +33,7 @@ endif::backend-docbook[] * Support graceful shutdown command * Console - support L1 BPS using `-m 10bpsl1` see link:http://trex-tgn.cisco.com/youtrack/issue/trex-230[trex-230] * Improve TUI refresh time -* Support IPV6 latency streams (support is available for all interface types except 82599) see link:trex_stateless.html#_tutorial_per_stream_latency_jitter_packet_errors +* Support IPV6 latency streams (support is available for all interface types except 82599) see link:trex_stateless.html#_tutorial_per_stream_latency_jitter_packet_errors[IPV6 latency] [IMPORTANT] ===================================== diff --git a/trex_faq.asciidoc b/trex_faq.asciidoc index 8927750e..1e2338a1 100644 --- a/trex_faq.asciidoc +++ b/trex_faq.asciidoc @@ -49,7 +49,7 @@ A feature that terminate TCP can't be tested yet ==== Who is using TRex? -Cisco systems, Intel, Imperva, Vasona networks +Cisco systems, Intel, Imperva, Vasona networks and probably more ==== Can TRex run on the hypervisor with virtual NICS? @@ -68,11 +68,11 @@ The limitations: ==== Is Cisco VIC supported? No. The driver does not pass our regression -==== Do you have 100Gb/s NIC QSFP+ support? +==== Is 100Gb/s NIC QSFP+ supported? Not yet, working on FM10K and Mellanox Connectx5 -==== Do you have GUI? -We are not developing it. Have a look here for one Stateless GUI from Exalt link:https://groups.google.com/forum/#!searchin/trex-tgn/sari%7Csort:relevance/trex-tgn/R92-N2Yjy2Q/DIUe06YCBgAJ[here] +==== Is there a GUI? +The core team is not developing it. Have a look link:https://groups.google.com/forum/#!searchin/trex-tgn/sari%7Csort:relevance/trex-tgn/R92-N2Yjy2Q/DIUe06YCBgAJ[here] for one Stateless GUI from Exalt ==== What is the maximum number of ports per TRex application? @@ -98,9 +98,22 @@ Two options: 2. Open a defect here link:https://trex-tgn.cisco.com/youtrack[youtrack] +==== I have X710 NIC with 4x10Gb/sec ports and I can't get line rate +Correct. x710da4fh with 4 ports of 10gb can reached maximum of 40MPPS (total for all the ports) and not 60MPPS with small packets (64B) +still it is better than x520 (559 based) than can give ~30MPPS for two ports with one NIC + +==== I have XL710 NIC with 2x40Gb/sec ports and I can't get line rate +Correct. XL710-da2 with 2 ports of 40gb can reached maximum of 40MPPS/50Gb (total for all the ports) and not 60MPPS with small packets (64B) +The two ports are for redundancy and can't reach 80Gb/sec line rate + ==== I want to contribute You are welcome, just create a gitHub pool request +==== What is the release process? +It is a continuous integration - the latest version is under 24/7 regression on a few setups. Once we have enough content we release it to GitHub with a new tag. +We don't send an email for every new release as it could be too frequent for some. + + === Stateful ==== TRex is connected to a Switch and we observe many drop packets at TRex startup @@ -118,10 +131,18 @@ TRex performance depends on many factors: 2. More concurrent flows will reduce the performance 3. Short flows with one/two packets (e.g. cap2/dns.yaml ) will give the worst performance -==== Do you have plans to add TCP stack? - +==== Is there a plan to add TCP stack? Yes +==== How can I run the YAML profile and capture the results to a pcap file? +you can use the simulator. see link:trex_manual.html#_simulator[simulator] +The output of the simulator can be loaded to Excel. The CPS can be tuned + +==== I want to have more active flows, how can I do it +Each profile will have the same active flows/Gb in TRex. DUT will have much more active flows in case of a UDP flow due to the nature of aging (DUT does not know when the flow ends while TRex knows) +to artificialy increse the active flows - you can incress the IPG in the YAML file. or change the pcap file and + + === Stateless @@ -220,7 +241,7 @@ use `random_seed` per stream No. each stream has it own, seperate field engine program -==== Do you have plan to have LUAJit as a field engine program +==== Is there a plan to have LUAJit as a field engine program? It is a great idea to add it, we are looking for someone to contribute this support @@ -261,6 +282,7 @@ There is a proxy over RPC that you can extend to your use cases. It has basic fu You could use it to send some pcap file via ports, or so-called python profiles, which you can configure by passing different variables (so-called tunabels) via the RPC. Take a look link:trex_stateless.html#_using_stateless_client_via_json_rpc[using_stateless_client_via_json_rpc] You can even dump the profile as a string and move it to the proxy to run it (but it is a security hole as anything can be run on the TRex server) +for more info see link:trex_stateless.html#_using_stateless_client_via_json_rpc[using_stateless_client_via_json_rpc] see link:https://github.com/zverevalexei/trex-http-proxy[here] for simple Web server proxy @@ -312,11 +334,9 @@ We are planning to add MACs to `./dpdk_setup_ports.py -s` ===================================== ==== TRex traffic does not show up on Wireshark, so I can not capture the traffic from the TRex port - TRex uses DPDK which takes ownership of the ports, so using Wireshark is not possible. You can use switch with port mirroring to capture the traffic. ==== How can I map betwean TRex ports-id (e.g. port 0) to physical router interface - Load TRex in a stateless mode and run traffic from each port diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index a25292c2..4a386a9b 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -38,7 +38,7 @@ endif::backend-xhtml11[] This document assumes basic knowledge of TRex, and assumes that TRex is installed and configured. For information, see the link:trex_manual.html[manual], especially the material up to the link:trex_manual.html#_basic_usage[Basic Usage] section. -== Stateless support (Alpha stage) +== Stateless support (Beta stage) === High level functionality // maybe Feature overview -- cgit 1.2.3-korg From add0940affc15fae6a3f521aa61d39fd9e21e183 Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Thu, 11 Aug 2016 13:55:09 +0300 Subject: Removing not about X710 IPv6 flow statistics not supported --- trex_stateless.asciidoc | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 4a386a9b..88eed076 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -2718,7 +2718,7 @@ trex> ** The IPv4 identification (or IPv6 flow label in case of IPv6 packet) field of the stream is changed to a value within the reserved range 0xff00 to 0xffff (0xff00 to 0xfffff in case of IPv6). Note that if a stream for which no statistics are needed has an IPv4 Id (or IPv6 flow label) in the reserved range, it is changed (the left bit becomes 0). ** Software implementation: Hardware rules are used to direct packets from relevant streams to rx thread, where they are counted. ** Hardware implementation: Hardware rules are inserted to count packets from relevant streams. -* Summed up statistics (per stream, per port) are sent using a link:http://zguide.zeromq.org/[ZMQ] async channel to clients. +* Summed up statistics (per stream, per port) is sent using a link:http://zguide.zeromq.org/[ZMQ] async channel to clients. *Limitations*:: @@ -2727,10 +2727,6 @@ trex> ** IPv4 with one VLAN tag (except 82599 which does not support this type of packet) ** IPv6 over Ethernet (except 82599 which does not support this type of packet) ** IPv6 with one VLAN tag (except 82599 which does not support this type of packet) -[NOTE] -===================================== -X710 support for IPv6 for this feature is not available yet. Will be added soon. Latency for X710 (next section) is supported. -===================================== * Maximum number of concurrent streams (with different pg_id) on which statistics may be collected: 127 @@ -2903,7 +2899,7 @@ in the "Per stream statistics" section is also available. the rate of a latency stream, you need to manually edit your profile file. Usually this is not necessary, since normally you stress the system using non latency stream, and (in parallel) measure latency using constant rate latency stream. -Two examples follow, one using the console and the other using the Python API. +Two examples follow. One using the console and the other using the Python API. *Console*:: -- cgit 1.2.3-korg From 8c57c276be77aca4a68a46b1af711c1bf4a2b93b Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Thu, 11 Aug 2016 16:50:24 +0300 Subject: FAQ corrections --- trex_faq.asciidoc | 325 +++++++++++++++++++++++++++++------------------------- 1 file changed, 176 insertions(+), 149 deletions(-) diff --git a/trex_faq.asciidoc b/trex_faq.asciidoc index 1e2338a1..da20711e 100644 --- a/trex_faq.asciidoc +++ b/trex_faq.asciidoc @@ -1,8 +1,8 @@ -TRex Stateless support -====================== +TRex Frequently Asked Questions +================================ :author: TRex team :email: trex.tgen@gmail.com -:revnumber: 0.1 +:revnumber: 0.2 :quotes.++: :numbered: :web_server_url: http://trex-tgn.cisco.com/trex @@ -36,123 +36,206 @@ endif::backend-xhtml11[] === General +==== What is TRex? +TRex is fast realistic open source traffic generation tool, running on standard Intel processors, based on DPDK. It supports both stateful and stateless traffic generation modes. + ==== What are the common use cases for TRex? -1. High scale benchmarks for Stateful features that inspect the traffic like Firewall/NAT/DPI -2. Higg scale DDOS Attacks see link:https://www.incapsula.com/blog/trex-traffic-generator-software.html[Why TRex is Our Choice of Traffic Generator Software] -3. High scale and flexible testing for Switch (e.g. RFC2544)- see link:https://wiki.fd.io/view/CSIT[fd.io] -4. Scale tests for a number of clients/servers/VLAN for controller base testing +1. High scale benchmarks for stateful networking gear. For example: Firewall/NAT/DPI. +2. Generating high scale DDOS attacks. See link:https://www.incapsula.com/blog/trex-traffic-generator-software.html[Why TRex is Our Choice of Traffic Generator Software] +3. High scale, flexible testing for switchs (e.g. RFC2544)- see link:https://wiki.fd.io/view/CSIT[fd.io] +4. Scale tests for huge numbers of clients/servers for controller based testing. +5. EDVT and production tests. [NOTE] ===================================== -A feature that terminate TCP can't be tested yet +Features terminating TCP can't be tested yet. ===================================== -==== Who is using TRex? +==== Who uses TRex? + +Cisco systems, Intel, Imperva, Melanox, Vasona networks and much more. -Cisco systems, Intel, Imperva, Vasona networks and probably more +==== What are the Stateful and Stateless modes of operation? -==== Can TRex run on the hypervisor with virtual NICS? +'Stateful' mode is meant for testing networking gear which save state per flow (5 tuple). Usually, this is done by injecting pre recorded cap files on pairs of interfaces of the device under test, changing src/dst IP/port. +'Stateless' mode is meant to test networking gear, not saving state per flow (doing the decision on per packet bases). This is usually done by injecting customed packet streams to the device under test. +See link:trex_stateless.html#_stateful_vs_stateless[here] for more details. -Yes, currently there is a need to have 2-3 cores and 4GB of memory (4GB can reduce significantly if required) +==== Can TRex run on an hypervisor with virtual NICS? -The limitations: +Yes. Currently there is a need for 2-3 cores and 4GB memory. For VM use case, memory requirement can be significantly reduced if needed (at the cost of supporting less concurrent flows) +by using the following link:trex_manual.html#_memory_section_configuration[configuration] -1. Each dual NICS (e.g. VMXNET3) you can have maximum one core -2. vSwitch can limit the maximum PPS to ~1MPPS -3. Latency results would not be accurate +Limitations: + +1. Performance is limited. For each NIC port pair, you can utilize only one CPU core. +2. Using vSwitch will limit the maximum PPS to around 1MPPS. +3. Latency results will not be accurate. -==== Why not all DPDK drivers are supported? -1. flow-director accelerator is used for each NIC type. -2. We have regression per each recommended NIC +==== Why not all DPDK supported NICs supported by TRex? +1. We are using specific NIC features. Not all the NICs have the capabilities we need. +2. We have regression tests in our lab for each recommended NIC. We don't claim to support NICs we don't have in our lab. ==== Is Cisco VIC supported? -No. The driver does not pass our regression +No. Currently its DPDK driver does not support the capabilities needed to run TRex. -==== Is 100Gb/s NIC QSFP+ supported? -Not yet, working on FM10K and Mellanox Connectx5 +==== Is 100Gbs NIC QSFP+ supported? +Not yet. Support for FM10K and Mellanox Connectx5 is under development. ==== Is there a GUI? -The core team is not developing it. Have a look link:https://groups.google.com/forum/#!searchin/trex-tgn/sari%7Csort:relevance/trex-tgn/R92-N2Yjy2Q/DIUe06YCBgAJ[here] for one Stateless GUI from Exalt - +TRex team is not developing it. Have a look link:https://groups.google.com/forum/#!searchin/trex-tgn/sari%7Csort:relevance/trex-tgn/R92-N2Yjy2Q/DIUe06YCBgAJ[here] for TRex Stateless mode GUI from Exalt company. ==== What is the maximum number of ports per TRex application? 12 ports -==== I can't see all 12 ports statistics on TRex server -Right, we present only the first four ports statistics (global statistics are still ok, like total Tx) because there is no console space. -Use the GUI or API to get per port statistics. +==== I can not see all 12 ports statistics on TRex server . +We present statistics only for first four ports because there is no console space. Global statistics (like total TX) is correct, taking into account all ports. +You can use the GUI/console or Python API, to see statistics for all ports. ==== Can I run multiple TRex servers on the same machine? -Yes. +One option for running few instances on the same physical machine is to install few VMs. +Currently, it is complicated to do without using VMs (but possible with some advanced config file options). We are working on +a solution to make this easier. + +==== Can I use multiple types of ports with the same TRex server instance? +No. All ports in the configuration file should be of the same NIC type. + +==== What is better, running TRex on VM with PCI pass through or TRex on bare metal? +The answer depends on your budget and needs. Bare metal will have lower latency and better performance. VM has the advantages you normally get when using VMs. + +==== I want to report an issue. + +You have two options: + +1. Send email to our support group: trex.tgen@gmail.com + +2. Open a defect at our link:https://trex-tgn.cisco.com/youtrack[youtrack]. You can also influence by voting in youtrack for an +existing issue. Issues with lots of voters will probably be fixed sooner. + + +==== I have Intel X710 NIC with 4x10Gb/sec ports and I can not get line rate. +x710da4fh with 4 10G ports can reach a maximum of 40MPPS (total for all ports) with 64 bytes packets. (can not reach the theoretical 60MPPS limit). +This is still better than the Intel x520 (82559 based) which can reach ~30MPPS for two ports with one NIC. + +==== I have XL710 NIC with 2x40Gb/sec ports and I can not get line rate +XL710-da2 with 2 40G ports can reach maximum of 40MPPS/50Gb (total for all ports) and not 60MPPS with small packets (64B) +Intel had in mind redundancy use case when they produced a two port NIC. Card was not intended to reach 80G line rate. + +==== I want to contribute to the project +You have several ways you can help: + +1. Download the product, use it, and report issues (If no issues, we will be very happy to also hear success stories). + +2. If you use the product and have improvment suggestions (for the product or documentation) we will be happy to hear. + +3. If you fix a bug, or develop new feature, you are more than welcome to create pool request in GitHub. + +==== What is the release process? How do I know when a new release is available? +It is a continuous integration. The latest internal version is under 24/7 regression on few setups in our lab. Once we have enough content we release it to GitHub (Usually every few weeks). +We don't send an email for every new release, as it could be too frequent for some people. We announce big feature releases on the mailing list. You can always check the GitHub of course. + +=== Startup and Installation + +==== Can I experiment with TRex without installing? +You can. Check the TRex sandbox at Cisco devnet in the following link:https://devnetsandbox.cisco.com/RM/Diagram/Index/2ec5952d-8bc5-4096-b327-c294acd9512d?diagramType=Topology[link]. + +==== How do I obtain TRex, and what kind of hardware do I need? +You have several options. + +1. For playing around and experimenting, you can install TRex on VirtualBox by following this link:trex_vm_manual.html[link]. + +2. To run the real product, check link:trex_manual.html#_download_and_installation[here] for hardware recommendation and +installation instructions. + +==== During OS installation, screen is skewed / error "out of range" / resolution not supported etc + + * Fedora - during installation, choose "Troubleshooting" -> Install in basic graphic mode + * Ubuntu - try Ubuntu server, which has textual installation + +==== How to determine relation between TRex ports and device under test ports + +Run the TRex with following command and check incoming packet on router interfaces: + +[source,bash] +---- + sudo ./t-rex-64 -f cap2/dns.yaml --lm 1 --lo -l 1000 -d 100 +---- -==== Can I use multiple type of ports with the same TRex server? -No. +==== How to determine relation between Virtual OS ports and Hypervisor ports -==== TRex on a VM with PCI Pass through or bare metal? -The bare metal will have a lower latency results +Compare the MACs address + name of interface, for example: -==== I want to report an issue +[source,bash] +---- +* > ifconfig + +*eth0* Link encap:Ethernet *HWaddr 00:0c:29:2a:99:b2* + + ... -Two options: -1. Send email to trex.tgen@gmail.com -2. Open a defect here link:https://trex-tgn.cisco.com/youtrack[youtrack] +* > sudo ./dpdk_setup_ports.py -s + +*03:00.0* 'VMXNET3 Ethernet Controller' *if=eth0* drv=vmxnet3 unused=igb_uio +---- +[NOTE] +===================================== +If at TRex side the NICs are not visible to ifconfig, run: + +.... +sudo ./dpdk_nic_bind.py -b <1> <2> +.... -==== I have X710 NIC with 4x10Gb/sec ports and I can't get line rate -Correct. x710da4fh with 4 ports of 10gb can reached maximum of 40MPPS (total for all the ports) and not 60MPPS with small packets (64B) -still it is better than x520 (559 based) than can give ~30MPPS for two ports with one NIC +<1> driver name - vmxnet3 for VMXNET3 and e1000 for E1000 +<2> 03:00.0 for example -==== I have XL710 NIC with 2x40Gb/sec ports and I can't get line rate -Correct. XL710-da2 with 2 ports of 40gb can reached maximum of 40MPPS/50Gb (total for all the ports) and not 60MPPS with small packets (64B) -The two ports are for redundancy and can't reach 80Gb/sec line rate +We are planning to add MACs to `./dpdk_setup_ports.py -s` +===================================== -==== I want to contribute -You are welcome, just create a gitHub pool request +==== TRex traffic does not show up on Wireshark, so I can not capture the traffic from the TRex port +TRex uses DPDK which takes ownership of the ports, so using Wireshark is not possible. You can use switch with port mirroring to capture the traffic. -==== What is the release process? -It is a continuous integration - the latest version is under 24/7 regression on a few setups. Once we have enough content we release it to GitHub with a new tag. -We don't send an email for every new release as it could be too frequent for some. +==== How can I map betwean TRex port-id (e.g. port 0) and physical router interface? +Load TRex in stateless mode, run traffic from each port, and look at the counters on the router interfaces. === Stateful -==== TRex is connected to a Switch and we observe many drop packets at TRex startup -A Switch might be configured with spanning tree enable. TRex initialized the Interface port at startup and making the spanning tree drop the packets. -Disable spanning tree can help. On Cisco nexus it would be `switch(config)#spanning-tree port type edge` -This issue would be fixed when we consolidate Stateful and Stateless RPC -==== I can't see RX packets -TRex does not support ARP yet, you should configure the DUT to send the packets to the TRex port MAC-ADDR. From Stateless mode, you can change the port mode to promiscuous - -==== Why the performance is low? +==== How do I start using the stateful mode? +You should first have a YAML configuration file. See link:trex_manual.html#_traffic_yaml_parameter_of_f_option[here]. +Then, you can find some basic examples link:trex_manual.html#_trex_command_line[here]. + +==== TRex is connected to a switch and we observe many dropped packets at TRex startup. +A switch might be configured with spanning tree enabled. TRex initializes the port at startup, making the spanning tree drop the packets. +Disabling spanning tree can help. On Cisco nexus, you can do that using `spanning-tree port type edge` +This issue would be fixed when we consolidate 'Stateful' and 'Stateless' RPC. + +==== I can not see RX packets +TRex does not support ARP yet, you should configure the DUT to send the packets to the TRex port MAC address. From Stateless mode, you can change the port mode to promiscuous. + +Also, revisit your MAC address configuration in the TRex config file. Wrong MAC address configuration will cause all packets to be dropped. + +==== Why is the performance low? TRex performance depends on many factors: -1. make sure trex_cfg.yaml is optimal see "platform" section in manual +1. Make sure trex_cfg.yaml is optimal see "platform" section in manual 2. More concurrent flows will reduce the performance 3. Short flows with one/two packets (e.g. cap2/dns.yaml ) will give the worst performance ==== Is there a plan to add TCP stack? -Yes +Yes. We know this is something many people would like, and are working on this. No ETA yet. Once a progress is made, we will announce it on the TRex site and mailing list. ==== How can I run the YAML profile and capture the results to a pcap file? -you can use the simulator. see link:trex_manual.html#_simulator[simulator] -The output of the simulator can be loaded to Excel. The CPS can be tuned +You can use the simulator. see link:trex_manual.html#_simulator[simulator] +The output of the simulator can be loaded to Excel. The CPS can be tuned. -==== I want to have more active flows, how can I do it -Each profile will have the same active flows/Gb in TRex. DUT will have much more active flows in case of a UDP flow due to the nature of aging (DUT does not know when the flow ends while TRex knows) -to artificialy increse the active flows - you can incress the IPG in the YAML file. or change the pcap file and - +==== I want to have more active flows on the DUT, how can I do it? +After stretching TRex to its maximum CPS capacity, consider the following: DUT will have much more active flows in case of a UDP flow due to the nature of aging (DUT does not know when the flow ends while TRex knows). +In order to artificialy increse the length of the active flows in TRex, you can config larger IPG in the YAML file. This will cause each flow to last longer. Alternatively, you can increase IPG in your PCAP file as well. === Stateless +==== How do I get started with stateless mode? +You should first have a YAML configuration file. See link:trex_manual.html#_traffic_yaml_parameter_of_f_option[here]. +Then, you can have a look at the stateless manual link:trex_stateless.html[here]. You can jump right into the link:trex_stateless.html#_tutorials[tutorials section]. + ==== Is pyATS supported as client framework -Yes. both Python 3 and Python 2 +Yes. Both Python 3 and Python 2 -==== Python API does not work on my Mac with ZMQ library issue +==== Python API does not work on my Mac with the below ZMQ library issue -We are using Python ZMQ wrapper. it needs to compiled per platform and we have a support for many platforms but not all of them +We are using Python ZMQ wrapper. It needs to be compiled per platform and we have a support for many platforms but not all of them. You will need to build ZMQ for your platform if it is not part of the package. [source,Python] @@ -173,21 +256,20 @@ OSError: /lib64/libc.so.6: version `GLIBC_2.14' not found (required by /home/shi ==== Is multi-user supported -Yes. +Yes. Multiple TRex clients can connect to the same TRex server. -==== Can I create a corrupted packet? +==== Can I create a corrupted packets? Yes. You can build any packet you like using Scapy. -However, there is no way to corrupt FCS yet. +However, there is no way to create corrupted L1 fields (Like Ethernet FCS), since these are usually handled by the NIC hardware. ==== Why the performance is low? -What would reduce the performance: -1. More concurent streams -2. Complex field engine program +What would reduce the performance: -Adding 'cache' directive can improve the performance see +1. Many concurent streams. +2. Complex field engine program. -see link:trex_stateless.html#_tutorial_field_engine_significantly_improve_performance[here] +Adding 'cache' directive can improve the performance. See link:trex_stateless.html#_tutorial_field_engine_significantly_improve_performance[here] and try this: @@ -216,14 +298,9 @@ $start -f stl/udp_1pkt_src_ip_split.py -m 100% <1> cache -==== I want to generate gratuitous ARP/NS IPv6 - -see example link:trex_stateless.html#_tutorial_field_engine_many_clients_with_arp[here] +==== I want to generate gratuitous ARP/NS IPv6. - -==== What is the diffrance betwean Stateful and Stateles - -see link:trex_stateless.html#_stateful_vs_stateless[here] +See example link:trex_stateless.html#_tutorial_field_engine_many_clients_with_arp[here] ==== How do I create a deterministic random stream variable @@ -236,35 +313,35 @@ use `random_seed` per stream mode = STLTXCont()) ---- -==== Can I have a synconization betwean different stream variables +==== Can I have a synconization betwean different stream variables? No. each stream has it own, seperate field engine program ==== Is there a plan to have LUAJit as a field engine program? -It is a great idea to add it, we are looking for someone to contribute this support - +It is a great idea to add it, we are looking for someone to contribute this support. -==== Streams with latency enabled does not amplified by multiplier, why? -The reason for the above (besides being a CPU constrained feature) is that most time the use case is that the latency is being used to validate that the traffic profile can stand under stress. -This way you can use the multiplier to amplify the main traffic, but not the 'testing probe'. -to mitigate this in the Console, you use 'tunables' for this -You can add in the Python profile a tunable that will specify the latency stream rate and you can provide it during the start in the console or in the API. - -tunables can be added through the console using 'start ... -t latency_rate=XXXXX' +==== Streams with latency enabled do not get amplified by multiplier, why? +Reason for this (besides being a CPU constrained feature) is that most of the time, the use case is that you load the DUT using some traffic streams, and check latency +using different streams. The latency stream is kind of 'testing probe' which you want to keep at constant rate, while playing with the rate of your other (loading) streams. +So, you can use the multiplier to amplify your main traffic, without changing your 'testing probe'. +If you do want to amplify latency streams, you can do this using 'tunables'. +You can add in the Python profile a 'tunable' which will specify the latency stream rate and you can provide it to the 'start' command in the console or in the API. +Tunables can be added through the console using 'start ... -t latency_rate=XXXXX' or using the Python API directly (for automation): STLProfile.load_py(..., latency_rate = XXXXX) +You can see example for defining and using tunables link:trex_stateless.html#_tutorial_advanced_traffic_profile[here]. -==== latency and statistic per stream is not supported for all type of packets +==== Latency and statistic per stream is not supported for all types of packets. -Correct. Flow-director for counting/steering the packets. each NIC has its own support +Correct. We use NIC capabilities for counting the packets or directing them to be handled by software. Each NIC has its own capabilities. Look link:trex_stateless.html#_tutorial_per_stream_statistics[here] and link:/trex_stateless.html#_tutorial_per_stream_latency_jitter_packet_errors[here] for details. ==== Java API instead of Python API -Q:: I want to use the Python API via Java (with Jython), apparently, I cannot import Scapy modules with jython. +Q:: I want to use the Python API via Java (with Jython), apparently, I can not import Scapy modules with Jython. The way I see it I have two options: 1. Creating python scripts and call them from java (with ProcessBuilder for example) @@ -277,67 +354,17 @@ Any ideas on what is the best way to use Trex within JAVA? A:: -The power of our Python API is the scapy integration for simple building of the packets / fueled engine -There is a proxy over RPC that you can extend to your use cases. It has basic function like connect/start/stop/get_stats +The power of our Python API is the scapy integration for simple building of the packets and the field engine. +There is a proxy over RPC that you can extend to your use cases. It has basic functionality, like connect/start/stop/get_stats. You could use it to send some pcap file via ports, or so-called python profiles, which you can configure by passing different variables (so-called tunabels) via the RPC. -Take a look link:trex_stateless.html#_using_stateless_client_via_json_rpc[using_stateless_client_via_json_rpc] -You can even dump the profile as a string and move it to the proxy to run it (but it is a security hole as anything can be run on the TRex server) -for more info see link:trex_stateless.html#_using_stateless_client_via_json_rpc[using_stateless_client_via_json_rpc] +Take a look at link:trex_stateless.html#_using_stateless_client_via_json_rpc[using_stateless_client_via_json_rpc]. +You can even dump the profile as a string and move it to the proxy to run it (Notice that it is a potential security hole, as you allow outside content to run as root on the TRex server). -see link:https://github.com/zverevalexei/trex-http-proxy[here] for simple Web server proxy +See link:https://github.com/zverevalexei/trex-http-proxy[here] an example for simple Web server proxy for interacting with TRex. ==== Where can I find a reference to RFC2544 using TRex link:https://gerrit.fd.io/r/gitweb?p=csit.git;a=tree;f=resources;hb=HEAD[here] -=== Installation - -==== During OS installation, screen is skewed / error Out of range / resolution not supported etc - - * Fedora - during installation, choose "Troubleshooting" -> Install in basic graphic mode - * Ubuntu - try Ubuntu server, which has textual installation - -==== How to determine relation between TRex ports and Router ports - -Run the TRex with following command and check incoming packet on router interfaces: - -[source,bash] ----- - sudo ./t-rex-64 -f cap2/dns.yaml --lm 1 --lo -l 1000 -d 100 ----- - -How to determine relation between Virtual OS ports and Hypervisor ports - -Compare the MACs address + name of interface, for example: - -[source,bash] ----- -* > ifconfig + -*eth0* Link encap:Ethernet *HWaddr 00:0c:29:2a:99:b2* + - ... - -* > sudo ./dpdk_setup_ports.py -s + -*03:00.0* 'VMXNET3 Ethernet Controller' *if=eth0* drv=vmxnet3 unused=igb_uio ----- - -[NOTE] -===================================== -If at TRex side the NICs are not visible to ifconfig, run: + -.... -sudo ./dpdk_nic_bind.py -b <1> <2> -.... - -<1> driver name - vmxnet3 for VMXNET3 and e1000 for E1000 -<2> 03:00.0 for example - -We are planning to add MACs to `./dpdk_setup_ports.py -s` -===================================== - -==== TRex traffic does not show up on Wireshark, so I can not capture the traffic from the TRex port -TRex uses DPDK which takes ownership of the ports, so using Wireshark is not possible. You can use switch with port mirroring to capture the traffic. - -==== How can I map betwean TRex ports-id (e.g. port 0) to physical router interface -Load TRex in a stateless mode and run traffic from each port - -- cgit 1.2.3-korg From c7cb056f103df37a35174d5e0652f4bde1b5c456 Mon Sep 17 00:00:00 2001 From: itraviv Date: Wed, 17 Aug 2016 14:37:59 +0300 Subject: added Image for the trex_scapy_rpc_server.asciidoc --- images/Scapy_JSON_rpc_server.png | Bin 0 -> 107058 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100755 images/Scapy_JSON_rpc_server.png diff --git a/images/Scapy_JSON_rpc_server.png b/images/Scapy_JSON_rpc_server.png new file mode 100755 index 00000000..22972d26 Binary files /dev/null and b/images/Scapy_JSON_rpc_server.png differ -- cgit 1.2.3-korg From 1af4a0b73edeeef3d6b5a1db4fc2993cdd110beb Mon Sep 17 00:00:00 2001 From: itraviv Date: Wed, 17 Aug 2016 14:50:19 +0300 Subject: added description for supported methods, added error description table added supported methods to RPC supported commands image: removed the outline titled 'TRex control plane' --- images/Scapy_JSON_rpc_server.png | Bin 107058 -> 98993 bytes trex_scapy_rpc_server.asciidoc | 587 +++++++++++++++++++++------------------ 2 files changed, 313 insertions(+), 274 deletions(-) diff --git a/images/Scapy_JSON_rpc_server.png b/images/Scapy_JSON_rpc_server.png index 22972d26..6e62e792 100755 Binary files a/images/Scapy_JSON_rpc_server.png and b/images/Scapy_JSON_rpc_server.png differ diff --git a/trex_scapy_rpc_server.asciidoc b/trex_scapy_rpc_server.asciidoc index 7578343f..bf771d4c 100755 --- a/trex_scapy_rpc_server.asciidoc +++ b/trex_scapy_rpc_server.asciidoc @@ -37,128 +37,115 @@ The TRex Scapy RPC Server Anyone who wants to create,edit and assemble packets for TRex -== RPC Support On TRex -TRex implements a RPC protocol in order to config, view and -in general execute remote calls on TRex -In this document we will provide information on -how a client can implement the protocol used to communicate with TRex +== Scapy RPC Server - Overview +Scapy Server is implemented following the link:http://www.jsonrpc.org/specification[JSON-RPC 2.0 specification], + +Therefore, requests and replies follow the JSON-RPC 2.0 spec. + +The server operates on a Request-Response basis *over ZMQ*, and does not support batched commands handling. + -In general, we will describe the following: +Read more about ZMQ link:http://zguide.zeromq.org/page:all[here] -* *Transport Layer* - The transport layer used to communicate with TRex server -* *RPC Reprensentation Protocol* - The format in which remote procedures are carried -=== Transport Layer +image::images/Scapy_JSON_rpc_server.png[title="Scapy JSON RPC Server",align="left",width=800, link="images/Scapy_JSON_rpc_server.png"] -TRex server transport layer is implemented using ZMQ. +=== Error Codes -The default configuration is TCP on port 5555, however this is configurable. +Error codes are given according to this table: [also follows the JSON-RPC spec, with added error codes] -{zwsp} + -The communication model is based on the request-reply ZMQ model: - -http://zguide.zeromq.org/page:all#Ask-and-Ye-Shall-Receive - -{zwsp} + - -for more on ZMQ and implementation please refer to: -{zwsp} + -http://zeromq.org/intro:read-the-manual - -=== RPC Reprensentation Protocol - -The RPC reprensentation protocol is JSON RPC v2.0. -Every request and response will be encoded in a JSON RPC v2.0 format. - -{zwsp} + +[options="header",cols="^1,^h,3a"] +|================= +| Error Code | Message | Meaning +| -32700 | Parse Error | Invalid JSON was received by the server. An error occurred on the server while parsing the JSON text. +| -32600 | Invalid Request | The JSON sent is not a valid Request object. +| -32601 | Method not found | The method does not exist / is not available +| -32603 | Invalid params | Invalid method parameter(s) +| -32097 | Syntax Error | Syntax Error in input +| -32098 | Scapy Server: message | Scapy Server had an error while executing your command, described in the message given +| -32096 | Scapy Server: Unknown Error | Scapy Server encountered an error that cannot be described -For more info on JSON RPC v2.0 spec please refer to: -{zwsp} + -http://www.jsonrpc.org/specification +|================= -{zwsp} + -Later on in the document we will describe all the supported commands. -=== TRex Console +== Data Bases and Data Structures used in Scapy Server +=== Protocol Field Description +This data sturcture contains the name of the field, its type and the default value assigned. + + + +Has the following structure: + -To debug RPC it is possible to enable verbose command from Console see link:draft_trex_stateless.html#_console_commands[here] -On the 'client' side: +(field name, field type, default value) + +[example] +==== +*Example:* + +this is the 'dst' field for the 'Ether' protocol [source,bash] ---- -TRex > verbose on - -verbose set to on - -TRex > ping - --> Pinging RPC server -[verbose] Sending Request To Server: - -{ - "id": "l0tog11a", - "jsonrpc": "2.0", - "method": "ping", - "params": null -} - -[verbose] Server Response: - -{ - "id": "l0tog11a", - "jsonrpc": "2.0", - "result": {} -} - -[SUCCESS] +["dst","MACField","('00:00:00:01:00:00')"] ---- +==== -== RPC Server Component Position Illustration - -The following diagram illustres the RPC server component's place: - -image::images/rpc_server_big_picture.png[title="RPC Server Position",align="left",width=800, link="images/rpc_server_big_picture.png"] - -== RPC Server Port State Machine -Any port on the server can be in numbered of states, each state provides other subset of the commands -that are allowed to be executed. - -We define the following possible states: - -* *unowned* - The specific port is either unowned or another user is owning the port -* *owned* - The specific port has been acquired by the client -* *active* - The specific port is in the middle of injecting traffic - currently active - -Each port command will specify on which states it is possible to execute it. - -For port related commands valid only on 'owned' or 'active', a field called ''handler'' 'MUST' be passed -along with the rest of the parameters. - +=== Offsets Dictionary and Offset Entry +==== The *"Offset Entry"* data sturcture contains the offset of a field within the *layer*, and its size. + +(both measured in Bytes) + + + + +Has the following structure: + + + +[field offset (within the layer), field size] + -This will identify the connection: -image::images/rpc_states.png[title="Port States",align="left",width=150, link="images/rpc_states.png"] +[example] +==== +*Example:* + +This is the 'src' field for the 'IP' protocol: + +the offset within the layer is 16 bytes, and the size of the field is 4 bytes (as defined in the IP spec) +[source,bash] +---- +'dst': [16, 4] +---- +==== -== Data Bases and Data Structures used in Scapy Server -=== Protocol Field Description -This data sturcture contains the name of the field, its type and the default value assigned. + +==== The *"Offsets Dictionary"* data sturcture simply maps the offsets for each layer according to name. + +Has the following structure: + + -has the following structure: + -(field name, field type, default value) + + 'field name' : [ field offset, field size ] // i.e Offset entry + -Example: -this is the 'dst' field for the 'Ether' protocol + + +[example] +==== +*Example:* + +This is the Offsets Dictionary for the IP layer: + [source,bash] ---- -["dst","MACField","('00:00:00:01:00:00')"] - +'offsets': {'IP': {'chksum': [10, 2], + 'dst': [16, 4], + 'flags': [6, 0], + 'frag': [6, 0], + 'global_offset': 0, + 'id': [4, 2], + 'ihl': [0, 0], + 'len': [2, 2], + 'options': [20, 2], + 'proto': [9, 1], + 'src': [12, 4], + 'tos': [1, 1], + 'ttl': [8, 1], + 'version': [0, 0] + } + } ---- +==== + +Each layer has a 'global_offset' key. this key represents the *offset of the layer within the packet*. + +In the example above, the IP layer starts at offset 0, and the field src is at offset 12 within the packet. + +In the general case, a field's offset within the *packet* is calculated this way: + + 'global_offset' + 'field_offset' === Protocol Dictionary @@ -166,7 +153,10 @@ The protocol dictionary contains the names for all supported protocols and layer Each entry in this data base has the following format: + 'Protocol Name' : 'Protocol Field Description' + + -Example: + +[example] +==== +*Example*: + [source,bash] ---- { "Ether":[ @@ -191,7 +181,7 @@ Example: . } ---- - +==== === Fields Dictionary The fields dictionary contains mapping between a field's name and its regular expression, + Which has the following structure: + @@ -217,9 +207,46 @@ The dictionary maintains its regular structure: == RPC Commands The following RPC commands are supported. please refer to data bases section for elaboration on given data bases +=== Supported Methods +* *Name* - supported_methods +* *Description* - returns the list of all supported methods by Scapy Server and their parameters +* *Parameters* - a single empty string ('') will return *ALL* supported methods. + + other string delivered as parameter will return True/False if the string matches a supported method name +* *Result* - according to input: empty string will return list of supported methods, otherwise will return True/False as mentioned + +*Example:* + +[source, bash] +---- +'Request': +{ + "jsonrpc": "2.0", + "id": "1", + "method": "supported_methods", + "params": [""] +} + +'Result': +{'id': '1', + 'jsonrpc': '2.0', + 'result': { . + . + . + . + 'build_pkt': [1, [u'pkt_descriptor']], + 'check_update': [2, [u'db_md5', u'field_md5']], + 'get_all': [0, []], + 'get_tree': [0, []], + 'get_version': [0, []], + 'supported_methods': [1, [u'method_name']] + } +} +---- + + + === GetAll * *Name* - 'get_all' -* *Valid States* - 'not relevant' * *Description* - Returns the supported protocols library (DB) and Field-to-RegEx mapping library, and their MD5 * *Paramters* - None * *Result* ['object'] - JSON format of dictionary. see table below @@ -230,70 +257,94 @@ The following RPC commands are supported. please refer to data bases section for | Key | Key Type | Value | Value Type | db | string | supported protocols dictionary | protocol dictionary | fields | string | Field-to-RegEx dictionary | Field-to-RegEx dictionary -| DB_md5 | string | MD5 of DB | JSON encoded in base64 -| fields_md5 | string | MD5 of fields | JSON encoded in base64 +| db_md5 | string | MD5 of DB | encoded in base64 +| fields_md5 | string | MD5 of fields | encoded in base64 |================= -Example: +*Example:* [source,bash] ---- 'Request': - { "jsonrpc": "2.0", "id": 1, "method": "get_all", - "params": null + "params": [] } 'Response': - { "jsonrpc" : "2.0", "id" : 1, - "result" : '{"fields_md5": "\\"oO1qiSnnm2SdORUM7Ca/Aw==\\\\n\\"", "fields": {"IP6Field": "empty", "NTPTimestampField": "empty", "XShortEnumField": "empty", "BitField": "empty", "TruncPktLenField": "empty", "ByteField": "empty", "Emph": "empty", "NIReplyDataField": "empty", "IPField": "empty", "StrLenField": "empty", "ShortEnumField": "empty", "FieldLenField": "empty", "ConditionalField": "empty", "XShortField": "empty", "XByteField": "empty", "ARPSourceMACField": "empty", "_HopByHopOptionsField": "empty", "NIQueryCodeF.......}' + "result" : {'db': {'ARP': [('hwtype', 'XShortField', '(1)'), + ('ptype', 'XShortEnumField', '(2048)'), + ('hwlen', 'ByteField', '(6)'), + ('plen', 'ByteField', '(4)'), + ('op', 'ShortEnumField', '(1)'), + ('hwsrc', 'ARPSourceMACField', '(None)'), + ('psrc', 'SourceIPField', '(None)'), + ('hwdst', 'MACField', "('00:00:00:00:00:00')"), + ('pdst', 'IPField', "('0.0.0.0')")], + . + . + . + 'db_md5': 'Z+gRt88y7SC0bDu496/DQg==\n', + 'fields': {'ARPSourceMACField': 'empty', + 'BCDFloatField': 'empty', + 'BitEnumField': 'empty', + . + . + . + } ---- === Check if DataBase is updated * *Name* - 'check_update' -* *Valid States* - 'not relevant' * *Description* - checks if both protocol database and fields database are up to date according to md5 comparison -* *Paramters* - md5 of database, md5 of fields in *JSON format encoded base64* -* *Result* ['object'] - Array of 2 objects of the following 3 tuple: ('result','error code','error description'), each for every database. in JSON format - -Example: +* *Paramters* - md5 of database, md5 of fields +* *Result* - upon failure: error code -32098 (see link:trex_scapy_rpc_server.html#_error_codes[RPC server error codes]) + + followed by a message: "Fields DB is not up to date" or "Protocol DB is not up to date" + + upon success: return 'true' as result (see below) + + + +*Example:* [source,bash] ---- 'Request': { - "jsonrpc": "2.0", - "id": 1, - "method": "check_update", - "params": { - "dbMD5": "'IlM5OXY3M2cxYUlEalNYcENhdmlmWGc9PVxuIg==\n'" - "fieldMD5": "'InMzdzBSaXAvQjFuK3p1ajF0NFcwbmc9PVxuIg==\n'" - } + "jsonrpc": "2.0", + "id": "1", + "method": "check_update", + "params": ["md5_of_protocol_db", "md5_of_fields"] } -'Response': +'Response': //on failure { - "jsonrpc": "2.0", - "id": 1, - "result": '[["Fail", -1, "Field DB is not up to date"], ["Success", 0, "None"]]' + "jsonrpc": "2.0", + "id": "1", + "error": { + "code": -32098, + "message:": "Scapy Server: Fields DB is not up to date" + } } +'Response': //on success + +{ + "jsonrpc": "2.0", + "id": "1", + "result": true +} ---- === Get Version * *Name* - 'get_version' -* *Valid States* - 'not relevant' * *Description* - Queries the server for version information * *Paramters* - None * *Result* ['object'] - See table below @@ -302,51 +353,62 @@ Example: [options="header",cols="1,1,3"] |================= | Field | Type | Description -| version | string | TRex version -| build_date | string | build date -| build_time | string | build time +| version | string | Scapy Server version | built_by | string | who built this version |================= + +*Example:* [source,bash] ---- 'Request': { - "id": "wapkk8m6", - "jsonrpc": "2.0", - "method": "get_version", - "params": null + "jsonrpc": "2.0", + "id": "1", + "method": "get_version", + "params": [] } + 'Response': { - "id": "wapkk8m6", - "jsonrpc": "2.0", - "result": { - "build_date": "Sep 16 2015", - "build_time": "12:33:01", - "built_by": "imarom", - "version": "v0.0" - } + "jsonrpc": "2.0", + "id": "1", + "result": { + "version": "v1.0", + "built_by": "itraviv" + } } ---- === Build Packet * *Name* - 'build_pkt' -* *Description* - Takes a JSON format string of a SCAPY packet and returns: + -*1)* Result of packet assembly. + -*2)* The show2 of the packet: detailed packet description (see SCAPY manual for more details). + -*3)* Buffer of the packet: hexdump of the given packet *encoded in 'base64'*. + -* *Paramters* - JSON string describing SCAPY packet -* *Result* ['object'] - JSON format string: + - Upon Success returns: [ Result, show2data, bufferData ] + - Upon Failure returns: [[ Pkt build Failed, ErrorType, ErrorDescription], [] ,[]] + +* *Description* - Takes a JSON format string of a SCAPY packet. + +* *Return Value* - See table below +* *Paramters* - string describing SCAPY packet +* *Result* ['dictionary'] - a dictionary that contains: + +* pkt buffer (Hexdump encoded in base64) + +* pkt offsets - each field within the packet has its offset within the layer, and the field size + + the value returned is [ 'field offset' , 'field size' ] + +* pkt show2 - a detailed description of each field and its value + + + +.Object type 'return values for build_pkt' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| pkt buffer | Hexdump encoded in base64 | The packet's dump +| pkt offsets | Dictionary of layers | Each layer contains it's offsets within the layer, and a global offset within the packet +| pkt show2 | Dictionary of layers | Each layer is a dictionary of fields, which contains the values for each field +|================= +*Example:* + Successful assembly of a packet: + [source,bash] ---- @@ -354,56 +416,94 @@ Successful assembly of a packet: + 'Request': { - "id": "zweuldlh", + "id": "1", "jsonrpc": "2.0", "method": "build_pkt", - "params": "Ether()/IP(src='127.0.0.1')/TCP(sport=80)" + "params": ["Ether()/IP(src='127.0.0.1')/TCP(sport=80)"] } 'Response': { - "id": "zweuldlh", - "jsonrpc": "2.0", - "result": { - '[["Success", 0, "None"], //result - - "\\"###[ Ethernet ]### //show2 data - \\\\n dst = 00:00:00:01:00:00 - \\\\n src = 00:00:00:02:00:00 - \\\\n type= IPv4 - \\\\n###[ IP ]### - \\\\n version = 4L - \\\\n ihl = 5L - \\\\n tos = 0x0 - \\\\n len = 40 - \\\\n id = 1 - \\\\n flags = - \\\\n frag = 0L - \\\\n ttl = 64 - \\\\n proto = tcp - \\\\n chksum = 0xcbcd - \\\\n src = 127.0.0.1 - \\\\n dst = 48.0.0.1 - \\\\n \\\\\\\\options \\\\\\\\ - \\\\n ###[ TCP ]### - \\\\n sport = 80 - \\\\n dport = 80 - \\\\n seq = 0 - \\\\n ack = 0 - \\\\n dataofs = 5L - \\\\n reserved = 0L - \\\\n flags = S - \\\\n window = 8192 - \\\\n chksum = 0xe040 - \\\\n urgptr = 0 - \\\\n options = {} - \\\\n\\"", - //buffer data: - "\\"AAAAAQAAAAAAAgAACABFAAAoAAEAAEAGy81/AAABMAAAAQBQAFAAAAAAAAAAAFACIADgQAAA\\\\n\\""]' - } + 'id': '1', + 'jsonrpc': '2.0', + 'result': { 'buffer': 'AAAAAQAAAAAAAgAACABFAAAoAAEAAEAGy81/AAABMAAAAQBQAFAAAAAAAAAAAFACIADgQAAA\n', + 'offsets':{ + 'Ether': { + 'dst': [0, 6], + 'global_offset': 0, + 'src': [6, 6], + 'type': [12, 2] + }, + 'IP': { + 'chksum': [10, 2], + 'dst': [16, 4], + 'flags': [6, 0], + 'frag': [6, 0], + 'global_offset': 14, + 'id': [4, 2], + 'ihl': [0, 0], + 'len': [2, 2], + 'options': [20, 2], + 'proto': [9, 1], + 'src': [12, 4], + 'tos': [1, 1], + 'ttl': [8, 1], + 'version': [0, 0] + }, + 'TCP': { + 'ack': [8, 4], + 'chksum': [16, 2], + 'dataofs': [12, 0], + 'dport': [2, 2], + 'flags': [13, 0], + 'global_offset': 34, + 'options': [20, 2], + 'reserved': [12, 0], + 'seq': [4, 4], + 'sport': [0, 2], + 'urgptr': [18, 2], + 'window': [14, 2] + } + }, + 'show2': { + 'Ethernet': { + 'dst': '00:00:00:01:00:00', + 'src': '00:00:00:02:00:00', + 'type': '0x800' + }, + 'IP': { + 'chksum': '0xcbcd', + 'dst': '48.0.0.1', + 'flags': '', + 'frag': '0L', + 'id': '1', + 'ihl': '5L', + 'len': '40', + 'proto': 'tcp', + 'src': '127.0.0.1', + 'tos': '0x0', + 'ttl': '64', + 'version': '4L' + }, + 'TCP': { + 'ack': '0', + 'chksum': '0xe040', + 'dataofs': '5L', + 'dport': '80', + 'flags': 'S', + 'options': '{}', + 'reserved': '0L', + 'seq': '0', + 'sport': '80', + 'urgptr': '0', + 'window': '8192' + } + } + } } + ---- Unsuccessful assembly of a packet: + @@ -422,113 +522,52 @@ Unsuccessful assembly of a packet: + 'Response': { - "id": "zweuldlh", - "jsonrpc": "2.0", - "result": { - '[["Pkt build Failed", "", "name \'ETHER\' is not defined"], [], []]' - } + 'id': 'zweuldlh', + 'jsonrpc': '2.0', + 'error': { + 'code': -32098, + 'message:': "Scapy Server: unsupported operand type(s) for -: 'Ether' and 'IP'" + } } - + ---- -=== Get offsets of fields inside a given packet -* *Name* - 'get_all_pkt_offsets' -* *Description* - Returns offset and size for each field inside the given packet -* *Paramters* - JSON string describing SCAPY packet -* *Result* ['Array'] - JSON format Array of 2 objects: + -*1)* Result object: (Result, ErrorType, ErrorDescription) '(when successful returns Success,0,None)' + -*2)* Dictionary of offsets per layer: each layer holds an array of field names and offsets + -'(when unsuccesful, returns an empty dictionary)' + - + -* Object describing field is formatted this way: ['field name','offset in layer','size in bytes'] + - -Successful call: -[source,bash] ----- - -'Request': - -{ - "id": "pbxny90u", - "jsonrpc": "2.0", - "method": "get_all_pkt_offsets", - "params": 'IP()' -} - -'Response': +=== Get protocol tree hierarchy example +* *Name* - 'get_tree' +* *Description* - returns a dictionary of protocols ordered in an hierarchy tree + +* *Paramters* - none +* *Result* [dictionary] - example for packet layers that can be used to build a packet. ordered in an hierarchy tree -{ - "id": "pbxny90u", - "jsonrpc": "2.0", - "result": {'[ - ["Success", 0, "None"], - { - "IP()": - [["version", 0, 0], ["ihl", 0, 0], ["tos", 1, 1], - ["len", 2, 2], ["id", 4, 2], ["flags", 6, 0], - ["frag", 6, 0], ["ttl", 8, 1], ["proto", 9, 1], - ["chksum", 10, 2], ["src", 12, 4], ["dst", 16, 4], - ["options", 20, 2]] - } - ]' - } -} +*Example:* ----- -Unsuccessful call: [source,bash] ---- 'Request': -{ - "id": "pbxny90u", - "jsonrpc": "2.0", - "method": "get_all_pkt_offsets", - "params": 'IP()-ether~' //not a valid SCAPY packet string +{ + "id": "1", + "jsonrpc": "2.0", + "method": "get_tree", + "params": [] } + 'Response': -{ - "id": "pbxny90u", - "jsonrpc": "2.0", - "result": { - '[ - ["Pkt build Failed", "", - "unexpected EOF while parsing (, line 1)"], - {} - ]' - } +{'id': '1', + 'jsonrpc': '2.0', + 'result': {'ALL': { + 'Ether': {'ARP': {}, + 'IP': { 'TCP': {'RAW': 'payload'}, + 'UDP': {'RAW': 'payload'} + } + } + } + } } - ---- -=== Get protocol tree hierarchy example -* *Name* - 'get_tree' -* *Description* - returns a JSON string of protocols ordered in an hierarchy tree + -* *Paramters* - none -* *Result* ['JSON string'] - JSON string of hierarchy tree for printing - -[source,bash] ----- - -'Request': - -{ - "id": "b1tr56yz", - "jsonrpc": "2.0", - "method": "get_tree", - "params": null -} -'Response': -{ - "id": "b1tr56yz", - "jsonrpc": "2.0", - "result": "'"ALL\\n\\tEther\\n\\t\\tARP\\n\\t\\tIP\\n\\t\\t\\tUDP\\n\\t\\t\\t\\tRaw\\n\\t\\t\\tTCP\\n\\t\\t\\t\\tRaw\\n"'" -} - ----- -- cgit 1.2.3-korg From 6c2b5cf3d8da0f7912100e7f45e87b0c24112586 Mon Sep 17 00:00:00 2001 From: itraviv Date: Thu, 18 Aug 2016 17:30:58 +0300 Subject: rpc server asciidoc: added usage section (still in progress) image: removed TRex background changed filename --- images/scapy_json_rpc_server.png | Bin 0 -> 172162 bytes trex_scapy_rpc_server.asciidoc | 43 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 42 insertions(+), 1 deletion(-) create mode 100755 images/scapy_json_rpc_server.png diff --git a/images/scapy_json_rpc_server.png b/images/scapy_json_rpc_server.png new file mode 100755 index 00000000..87050884 Binary files /dev/null and b/images/scapy_json_rpc_server.png differ diff --git a/trex_scapy_rpc_server.asciidoc b/trex_scapy_rpc_server.asciidoc index bf771d4c..19334718 100755 --- a/trex_scapy_rpc_server.asciidoc +++ b/trex_scapy_rpc_server.asciidoc @@ -47,7 +47,7 @@ The server operates on a Request-Response basis *over ZMQ*, and does not support Read more about ZMQ link:http://zguide.zeromq.org/page:all[here] -image::images/Scapy_JSON_rpc_server.png[title="Scapy JSON RPC Server",align="left",width=800, link="images/Scapy_JSON_rpc_server.png"] +image::images/scapy_json_rpc_server.png[title="Scapy JSON RPC Server",align="left",width=800, link="images/Scapy_JSON_rpc_server.png"] === Error Codes @@ -569,5 +569,46 @@ Unsuccessful assembly of a packet: + ---- +== Usage of Scapy RPC Server +Notice the existance of the following files: + +* scapy_service.py +* scapy_zmq_server.py +* scapy_zmq_client.py + +=== Scapy_zmq_server.py +In this section we will see how to bring up the Scapy ZMQ server. +There are 2 ways to run this server: + +* Through command line +* Through Python interpreter + +==== Running Scapy ZMQ Server from command line +Run the file scapy_zmq_server.py with the argument -s to declare the port that the server will listen to. + +Running the file without the "-s" argument will use *port 4507 by default*. + + + +Notice: + +* The Server's IP will be the IP address of the local host. +* The Server will accept requests from *any* IP address on that port. + +[source,bash] +---- +user$ python scapy_zmq_server.py -s 5555 + +***Scapy Server Started*** +Listening on port: 5555 +Server IP address: 10.0.0.1 + +---- + +==== Running Scapy ZMQ Server from the Python interpreter +Run the Python Interpreter (Scapy Server currently supports Python2) + + + + + + -- cgit 1.2.3-korg From f8444703c3faa34c31d5411e90610a46e9913589 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 18 Aug 2016 17:50:41 +0300 Subject: add FAQ questions --- trex_faq.asciidoc | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/trex_faq.asciidoc b/trex_faq.asciidoc index da20711e..f1f0859f 100644 --- a/trex_faq.asciidoc +++ b/trex_faq.asciidoc @@ -223,6 +223,45 @@ After stretching TRex to its maximum CPS capacity, consider the following: DUT w In order to artificialy increse the length of the active flows in TRex, you can config larger IPG in the YAML file. This will cause each flow to last longer. Alternatively, you can increase IPG in your PCAP file as well. +==== How do I support more active flows? +The default maximum supported flows are 1M total (TRex prospective). DUT could have much more due to aging. When active flows are more than 1M flows there is message that there is no enough memory. + +[source,Python] +-------- +Active-flows : 1045562 Clients : 80120 Socket-util : 0.0207 % +-------- + +Look link:trex_manual.html#_memory_section_configuration[here] + +This example support 10M flows + +[source,Python] +-------- +- port_limit : 2 + version : 2 + interfaces : ['04:00.0', '0c:00.0'] # list of the interfaces to bind run ./dpdk_nic_bind.py --status + port_info : # set eh mac addr + + - dest_mac : [0x18, 0x8b, 0x9d, 0xa3, 0xae, 0x84] + src_mac : [0x18, 0x8b, 0x9d, 0xa3, 0xae, 0x83] + + - dest_mac : [0x18, 0x8b, 0x9d, 0xa3, 0xae, 0x83] + src_mac : [0x18, 0x8b, 0x9d, 0xa3, 0xae, 0x84] + + memory : + dp_flows : 10048576 <1> +-------- +<1> 10M flows + + +==== ERROR The number of ips should be at least number of threads +The range of clients and servers should be at least the number of threads. +The number of threads is equal (dual_ports) * (-c value) + +==== Incoming frames are from type SCTP why? +Default latency packets are SCTP, you can remove `-l 1000` or change it to ICMP see manual for more info + + === Stateless ==== How do I get started with stateless mode? -- cgit 1.2.3-korg From 6bf9862abc8983a02529d4a4f8b332d915442197 Mon Sep 17 00:00:00 2001 From: itraviv Date: Thu, 18 Aug 2016 17:58:12 +0300 Subject: finished section of scapy_server usage --- trex_scapy_rpc_server.asciidoc | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/trex_scapy_rpc_server.asciidoc b/trex_scapy_rpc_server.asciidoc index 19334718..505e7124 100755 --- a/trex_scapy_rpc_server.asciidoc +++ b/trex_scapy_rpc_server.asciidoc @@ -603,9 +603,36 @@ Server IP address: 10.0.0.1 ---- ==== Running Scapy ZMQ Server from the Python interpreter -Run the Python Interpreter (Scapy Server currently supports Python2) +* Run the Python Interpreter (Scapy Server currently supports Python2) +* Import the scapy_zmq_server.py file +* Create a Scapy_server Object with argument as port number. default argument is port 4507 +* Invoke method activate. this is blocking because the server is listening on the port +[source,bash] +---- +user$ python +>>> from scapy_zmq_server import * +>>> s = Scapy_server() // starts with port 4507 +>>> s = Scapy_server(5555) //starts with port 5555 +>>> s.activate() +***Scapy Server Started*** +Listening on port: 5555 +Server IP address: 10.0.0.1 + +---- + +==== Shutting down Scapy ZMQ Server +There are 2 ways to shut down the server: + +* The server can be shut down using the keyboard interrupt Ctrl+C +* The server can be shut down remotely with the method "shut_down" with no arguments +[source,bash] +---- +//Sending Request: {"params": [], "jsonrpc": "2.0", "method": "shut_down", "id": "1"} +//Will result in this print by the server: +Server: Shut down by remote user +---- -- cgit 1.2.3-korg From d05da98a3cd4f5587e8cc7265e634c12ba5e03c6 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 18 Aug 2016 21:08:26 +0300 Subject: update supported OS --- trex_book.asciidoc | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 0ff41543..806e2697 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -185,17 +185,12 @@ VMXNET3 (see notes) | VMware paravirtualized | Connect using VMware vSwitch [IMPORTANT] ===================================== -* For VMXNET3, use Ubuntu. Fedora 18 is not supported and causes crash. * Intel SFP+ 10Gb/sec is the only one supported by default on the standard Linux driver. TRex also supports Cisco 10Gb/sec SFP+. // above, replace "only one" with "only mode"? * For operating high speed throughput (example: several Intel XL710 40Gb/sec), use different link:https://en.wikipedia.org/wiki/Non-uniform_memory_access[NUMA] nodes for different NICs. + To verify NUMA and NIC topology: `lstopo (yum install hwloc)` + To display CPU info, including NUMA node: `lscpu` + NUMA usage xref:numa-example[example] -* Using Intel XL710 with Fedora 18 requires updating kernel: -** `> sudo yum update kernel` -** `> sudo yum update kernel-devel` -** `> sudo yum update kernel-headers` * For Intel XL710 NICs, verify that the NVM is v5.04 . xref:xl710-firmware[Info]. ** `> sudo ./t-rex-64 -f cap2/dns.yaml -d 0 *-v 6* --nc | grep NVM` + `PMD: FW 5.0 API 1.5 NVM 05.00.04 eetrack 800013fc` @@ -232,11 +227,11 @@ NOTE: Purchase the 10Gb/sec SFP+ separately. Cisco would be fine with TRex (but ==== Supported versions Supported Linux versions: -* Fedora 18-20, 64-bit kernel (not 32-bit) +* Fedora 20-23, 64-bit kernel (not 32-bit) * Ubuntu 14.04.1 LTS, 64-bit kernel (not 32-bit) +* Ubuntu 16.xx LTS, 64-bit kernel (not 32-bit) NOTE: Additional OS version may be supported by compiling the necessary drivers. -// we should indicate exactly which drivers this means To check whether a kernel is 64-bit, verify that the ouput of the following command is `x86_64`. @@ -255,10 +250,6 @@ ISO images for supported Linux releases can be downloaded from: [options="header",cols="1^,2^",width="50%"] |====================================== | Distribution | SHA256 Checksum -| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/18/Fedora/x86_64/iso/Fedora-18-x86_64-DVD.iso[Fedora 18] - | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/18/Fedora/x86_64/iso/Fedora-18-x86_64-CHECKSUM[Fedora 18 CHECKSUM] -| link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-19-x86_64-DVD.iso[Fedora 19] - | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/19/Fedora/x86_64/iso/Fedora-19-x86_64-CHECKSUM[Fedora 19 CHECKSUM] | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-DVD.iso[Fedora 20] | link:http://archives.fedoraproject.org/pub/archive/fedora/linux/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-CHECKSUM[Fedora 20 CHECKSUM] | link:http://fedora-mirror01.rbc.ru/pub/fedora/linux/releases/21/Server/x86_64/iso/Fedora-Server-DVD-x86_64-21.iso[Fedora 21] -- cgit 1.2.3-korg From ec76fb361c0eee31813b6c071b746b2369d0d829 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 18 Aug 2016 22:02:52 +0300 Subject: v2.08 --- release_notes.asciidoc | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 7ecce8ef..32382414 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -23,6 +23,25 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 2.08 == + +* Scapy server for GUI packet crafting, see link:trex_scapy_rpc_server.html[trex_scapy_rpc_server] +* Client.start Python API supports Core mask - significantly improve the Stateless performance. TBD- Itay to update the pointer here +* Upgrade the ./dpdk_setup_ports.py script. It simplifies the way to create first time *optimized* config file (/etc/trex_cfg.yaml) + +[source,bash] +---- +$sudo ./dpdk_setup_ports.py script -t # show the list of ports +$sudo ./dpdk_setup_ports.py -c 03:00.0 03:00.1 # create optimum /etc/trex_cfg.yaml file +---- + +* Basic Cisco VIC functionality works now. Advanced Stateless/Stateful functionality is still not supported. + +=== fix issues: === + +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-240[trex-240] +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-246[trex-246] + == Release 2.07 == * DPDK 16.07 -- cgit 1.2.3-korg From ff049e3629323f7549d105682af1de3db8d91765 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 18 Aug 2016 22:22:02 +0300 Subject: v2.08 version --- release_notes.asciidoc | 6 ++++-- trex_book.asciidoc | 1 + trex_index.asciidoc | 4 +++- trex_rpc_server_spec.asciidoc | 7 +++---- trex_scapy_rpc_server-docinfo.html | 6 ++++++ trex_scapy_rpc_server.asciidoc | 26 ++++++-------------------- ws_main.py | 4 ++-- 7 files changed, 25 insertions(+), 29 deletions(-) create mode 100644 trex_scapy_rpc_server-docinfo.html diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 32382414..039e0445 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -25,8 +25,8 @@ endif::backend-docbook[] == Release 2.08 == -* Scapy server for GUI packet crafting, see link:trex_scapy_rpc_server.html[trex_scapy_rpc_server] -* Client.start Python API supports Core mask - significantly improve the Stateless performance. TBD- Itay to update the pointer here +* Scapy JSON-RPC server for GUI packet crafting, see link:trex_scapy_rpc_server.html[trex_scapy_rpc_server] +* Client.start Python API supports Core mask - significantly improve the Stateless performance. link:cp_stl_docs/_modules/trex_stl_lib/trex_stl_client.html#STLClient.start[start API], *TBD*- Itay to update the pointer here * Upgrade the ./dpdk_setup_ports.py script. It simplifies the way to create first time *optimized* config file (/etc/trex_cfg.yaml) [source,bash] @@ -35,6 +35,8 @@ $sudo ./dpdk_setup_ports.py script -t # show the list of ports $sudo ./dpdk_setup_ports.py -c 03:00.0 03:00.1 # create optimum /etc/trex_cfg.yaml file ---- +*Yaroslav* TBD + * Basic Cisco VIC functionality works now. Advanced Stateless/Stateful functionality is still not supported. === fix issues: === diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 806e2697..0c621d8a 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -227,6 +227,7 @@ NOTE: Purchase the 10Gb/sec SFP+ separately. Cisco would be fine with TRex (but ==== Supported versions Supported Linux versions: + * Fedora 20-23, 64-bit kernel (not 32-bit) * Ubuntu 14.04.1 LTS, 64-bit kernel (not 32-bit) * Ubuntu 16.xx LTS, 64-bit kernel (not 32-bit) diff --git a/trex_index.asciidoc b/trex_index.asciidoc index 158119c5..b2f3b1fd 100644 --- a/trex_index.asciidoc +++ b/trex_index.asciidoc @@ -80,7 +80,9 @@ link:../client_gui/[stateful GUI] |================= | Description | Name | Stateless server RPC specification | -link:trex_rpc_server_spec.html[server.html] +link:trex_rpc_server_spec.html[stl_rpc_server.html] +| Scapy server RPC specification | +link:trex_scapy_rpc_server.html[scapy_rpc_spec.html] | How to build | link:https://github.com/cisco-system-traffic-generator/trex-core/wiki[Wiki] |================= diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 9e81c53e..7d016112 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -83,14 +83,13 @@ http://zeromq.org/intro:read-the-manual The RPC reprensentation protocol is JSON RPC v2.0. Every request and response will be encoded in a JSON RPC v2.0 format. -{zwsp} + - +{zwsp}+ For more info on JSON RPC v2.0 spec please refer to: -{zwsp} + +{zwsp}+ http://www.jsonrpc.org/specification -{zwsp} + +{zwsp}+ Later on in the document we will describe all the supported commands. diff --git a/trex_scapy_rpc_server-docinfo.html b/trex_scapy_rpc_server-docinfo.html new file mode 100644 index 00000000..6fb66a5e --- /dev/null +++ b/trex_scapy_rpc_server-docinfo.html @@ -0,0 +1,6 @@ + + + + + + diff --git a/trex_scapy_rpc_server.asciidoc b/trex_scapy_rpc_server.asciidoc index bf771d4c..5c462726 100755 --- a/trex_scapy_rpc_server.asciidoc +++ b/trex_scapy_rpc_server.asciidoc @@ -9,18 +9,7 @@ The TRex Scapy RPC Server :local_web_server_url: csi-wiki-01:8181/trex :toclevels: 4 -++++ - -++++ +include::trex_ga.asciidoc[] == Change log @@ -35,9 +24,7 @@ The TRex Scapy RPC Server == Audience of this document -Anyone who wants to create,edit and assemble packets for TRex - - +TRex GUI guys == Scapy RPC Server - Overview Scapy Server is implemented following the link:http://www.jsonrpc.org/specification[JSON-RPC 2.0 specification], + @@ -46,7 +33,6 @@ The server operates on a Request-Response basis *over ZMQ*, and does not support Read more about ZMQ link:http://zguide.zeromq.org/page:all[here] - image::images/Scapy_JSON_rpc_server.png[title="Scapy JSON RPC Server",align="left",width=800, link="images/Scapy_JSON_rpc_server.png"] === Error Codes @@ -78,7 +64,7 @@ Has the following structure: + (field name, field type, default value) + -[example] +.Example ==== *Example:* + this is the 'dst' field for the 'Ether' protocol @@ -99,7 +85,7 @@ Has the following structure: + [field offset (within the layer), field size] + -[example] +.Example ==== *Example:* + This is the 'src' field for the 'IP' protocol: + @@ -117,7 +103,7 @@ Has the following structure: + + -[example] +.Example ==== *Example:* + This is the Offsets Dictionary for the IP layer: + @@ -154,7 +140,7 @@ Each entry in this data base has the following format: + 'Protocol Name' : 'Protocol Field Description' + + -[example] +.Example ==== *Example*: + [source,bash] diff --git a/ws_main.py b/ws_main.py index 930cbc34..2a534dfc 100755 --- a/ws_main.py +++ b/ws_main.py @@ -915,6 +915,8 @@ def build(bld): bld(rule=convert_to_html_toc_book, source='trex_rpc_server_spec.asciidoc waf.css', target='trex_rpc_server_spec.html',scan=ascii_doc_scan); + bld(rule=convert_to_html_toc_book, + source='trex_scapy_rpc_server.asciidoc waf.css', target='trex_scapy_rpc_server.html',scan=ascii_doc_scan); bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='vm_doc.asciidoc waf.css', target='vm_doc.html', scan=ascii_doc_scan) @@ -922,8 +924,6 @@ def build(bld): bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='packet_builder_yaml.asciidoc waf.css', target='packet_builder_yaml.html', scan=ascii_doc_scan) - bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', - source='trex_scapy_rpc_server.asciidoc waf.css', target='trex_scapy_rpc_server.html', scan=ascii_doc_scan) bld(rule='${ASCIIDOC} -a stylesheet=${SRC[1].abspath()} -a icons=true -a toc2 -a max-width=55em -o ${TGT} ${SRC[0].abspath()}', source='trex_control_plane_design_phase1.asciidoc waf.css', target='trex_control_plane_design_phase1.html', scan=ascii_doc_scan) -- cgit 1.2.3-korg From e13ea5bb473caf427ff3bdabd42d1b3434ca6aa1 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 18 Aug 2016 22:38:33 +0300 Subject: RL update --- release_notes.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 039e0445..0877be90 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -38,6 +38,9 @@ $sudo ./dpdk_setup_ports.py -c 03:00.0 03:00.1 # create optimum /etc/trex_cfg. *Yaroslav* TBD * Basic Cisco VIC functionality works now. Advanced Stateless/Stateful functionality is still not supported. +* Enforce latest firmware for XL710/X710 (5.04) +* Add a way to stop/close NICS at TRex termination (link would be down) `-close-at-end` +* IPv6 XL710 ICMP packets are supported now === fix issues: === -- cgit 1.2.3-korg From f02098de167be828a3ff72f28b4a5785d0579466 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Fri, 19 Aug 2016 02:37:28 +0300 Subject: script for creating trex_cfg.yaml info --- release_notes.asciidoc | 4 +--- trex_book.asciidoc | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 0877be90..e2c8385b 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -27,7 +27,7 @@ endif::backend-docbook[] * Scapy JSON-RPC server for GUI packet crafting, see link:trex_scapy_rpc_server.html[trex_scapy_rpc_server] * Client.start Python API supports Core mask - significantly improve the Stateless performance. link:cp_stl_docs/_modules/trex_stl_lib/trex_stl_client.html#STLClient.start[start API], *TBD*- Itay to update the pointer here -* Upgrade the ./dpdk_setup_ports.py script. It simplifies the way to create first time *optimized* config file (/etc/trex_cfg.yaml) +* Upgrade the ./dpdk_setup_ports.py script. It simplifies the way to create first time *optimized* config file (/etc/trex_cfg.yaml). More info at the manual: link:trex_manual.html#_script_for_creating_config_file[Script for creating config file] [source,bash] ---- @@ -35,8 +35,6 @@ $sudo ./dpdk_setup_ports.py script -t # show the list of ports $sudo ./dpdk_setup_ports.py -c 03:00.0 03:00.1 # create optimum /etc/trex_cfg.yaml file ---- -*Yaroslav* TBD - * Basic Cisco VIC functionality works now. Advanced Stateless/Stateful functionality is still not supported. * Enforce latest firmware for XL710/X710 (5.04) * Add a way to stop/close NICS at TRex termination (link would be down) `-close-at-end` diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 0c621d8a..b8de9f35 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -438,6 +438,58 @@ and example // where can we describe this limitation (TRex supports only one type of NIC at a time. You cannot mix different NIC types in one config file.) and other limitations? +==== Script for creating config file + +We provide a script to create configuration file by specifying as input desired interfaces. +Examples of using: + +* Get view of all ports as table: + +[source,bash] +---- +sudo ./dpdk_setup_ports.py -t +---- + +* Create config file with given interfaces. You can use PCI addresses or Linux IF (eth1 etc.): + +[source,bash] +---- +sudo ./dpdk_setup_ports.py -c 13:00.0 eth3 --dump -o /etc/trex_cfg.yaml +### Config file generated by dpdk_setup_ports.py ### + +- port_limit: 2 + version: 2 + interfaces: ['13:00.0', '1b:00.0'] + port_info: + - dest_mac: [0x00, 0x0c, 0x29, 0x2a, 0x99, 0xda] # MAC OF LOOPBACK TO IT'S DUAL INTERFACE + src_mac: [0x00, 0x0c, 0x29, 0x2a, 0x99, 0xd0] + - dest_mac: [0x00, 0x0c, 0x29, 0x2a, 0x99, 0xd0] # MAC OF LOOPBACK TO IT'S DUAL INTERFACE + src_mac: [0x00, 0x0c, 0x29, 0x2a, 0x99, 0xda] + + platform: + master_thread_id: 0 + latency_thread_id: 1 + dual_if: + - socket: 0 + threads: [2] +---- + +.Arguments of dpdk_setup_ports.py script +[options="header",cols="2,5,3",width="100%"] +|================= +| Arg | Description | Example +| -c | Create a configuration file by specified interfaces (PCI address or Linux names: eth1 etc.) | -c 03:00.1 eth1 eth4 84:00.0 +| --dump | Dump created config to screen. | +| -o | Output the config to this file. | -o /etc/trex_cfg.yaml +| --dest-macs | Destination MACs to be used in created yaml file per each interface. Without specifying the option, will be assumed loopback (0⇔1, 2⇔3 etc.) | --dest-macs 11:11:11:11:11:11 22:22:22:22:22:22 +| --ci | Cores include: White list of cores to use. Make sure there is enough for each NUMA. | --ci 0 2 4 5 6 +| --ce | Cores exclude: Black list of cores to exclude. Make sure there will be enough for each NUMA. | --ci 10 11 12 +| --no-ht | No HyperThreading: Use only one thread of each Core in created config yaml. | +| --prefix | Advanced option: prefix to be used in TRex config in case of parallel instances. | --prefix first_instance +| --zmq-pub-port | Advanced option: ZMQ Publisher port to be used in TRex config in case of parallel instances. | --zmq-pub-port 4000 +| --zmq-rpc-port | Advanced option: ZMQ RPC port to be used in TRex config in case of parallel instances. | --zmq-rpc-port +| --ignore-numa | Advanced option: Ignore NUMAs for config creation. Use this option only if you have to, as it might reduce performance. For example, if you have pair of interfaces at different NUMAs | +|================= ==== Run TRex -- cgit 1.2.3-korg From 9fe37bed2c2a0864cb114560b438f779c970cc3b Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Fri, 19 Aug 2016 09:49:09 +0300 Subject: release notes add issue242 note --- release_notes.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index e2c8385b..b6cd3485 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -43,6 +43,7 @@ $sudo ./dpdk_setup_ports.py -c 03:00.0 03:00.1 # create optimum /etc/trex_cfg. === fix issues: === * link:http://trex-tgn.cisco.com/youtrack/issue/trex-240[trex-240] +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-242[trex-242] * link:http://trex-tgn.cisco.com/youtrack/issue/trex-246[trex-246] == Release 2.07 == -- cgit 1.2.3-korg From 8c0e3d3af7cc1c4ae53c5457115d37578483021d Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Fri, 19 Aug 2016 11:41:23 +0300 Subject: dpdk_setup_ports fix example commands --- release_notes.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index b6cd3485..80c54803 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -31,8 +31,8 @@ endif::backend-docbook[] [source,bash] ---- -$sudo ./dpdk_setup_ports.py script -t # show the list of ports -$sudo ./dpdk_setup_ports.py -c 03:00.0 03:00.1 # create optimum /etc/trex_cfg.yaml file +$sudo ./dpdk_setup_ports.py -t # show the list of ports +$sudo ./dpdk_setup_ports.py -c 03:00.0 03:00.1 -o /etc/trex_cfg.yaml # create optimum /etc/trex_cfg.yaml file ---- * Basic Cisco VIC functionality works now. Advanced Stateless/Stateful functionality is still not supported. -- cgit 1.2.3-korg From 188d26c676d198a1dc813ca25d685537a2f675bb Mon Sep 17 00:00:00 2001 From: itraviv Date: Sun, 21 Aug 2016 11:32:34 +0300 Subject: fixed compile warnings --- trex_scapy_rpc_server.asciidoc | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/trex_scapy_rpc_server.asciidoc b/trex_scapy_rpc_server.asciidoc index 505e7124..b7d8c022 100755 --- a/trex_scapy_rpc_server.asciidoc +++ b/trex_scapy_rpc_server.asciidoc @@ -78,8 +78,7 @@ Has the following structure: + (field name, field type, default value) + -[example] -==== + *Example:* + this is the 'dst' field for the 'Ether' protocol [source,bash] @@ -87,7 +86,7 @@ this is the 'dst' field for the 'Ether' protocol ["dst","MACField","('00:00:00:01:00:00')"] ---- -==== + === Offsets Dictionary and Offset Entry ==== The *"Offset Entry"* data sturcture contains the offset of a field within the *layer*, and its size. + @@ -99,8 +98,7 @@ Has the following structure: + [field offset (within the layer), field size] + -[example] -==== + *Example:* + This is the 'src' field for the 'IP' protocol: + the offset within the layer is 16 bytes, and the size of the field is 4 bytes (as defined in the IP spec) @@ -108,7 +106,6 @@ the offset within the layer is 16 bytes, and the size of the field is 4 bytes (a ---- 'dst': [16, 4] ---- -==== ==== The *"Offsets Dictionary"* data sturcture simply maps the offsets for each layer according to name. + Has the following structure: + @@ -117,8 +114,7 @@ Has the following structure: + + -[example] -==== + *Example:* + This is the Offsets Dictionary for the IP layer: + [source,bash] @@ -140,7 +136,7 @@ This is the Offsets Dictionary for the IP layer: + } } ---- -==== + Each layer has a 'global_offset' key. this key represents the *offset of the layer within the packet*. + In the example above, the IP layer starts at offset 0, and the field src is at offset 12 within the packet. + @@ -154,8 +150,7 @@ Each entry in this data base has the following format: + 'Protocol Name' : 'Protocol Field Description' + + -[example] -==== + *Example*: + [source,bash] ---- @@ -181,7 +176,7 @@ Each entry in this data base has the following format: + . } ---- -==== + === Fields Dictionary The fields dictionary contains mapping between a field's name and its regular expression, + Which has the following structure: + @@ -534,9 +529,10 @@ Unsuccessful assembly of a packet: + === Get protocol tree hierarchy example * *Name* - 'get_tree' -* *Description* - returns a dictionary of protocols ordered in an hierarchy tree + -* *Paramters* - none -* *Result* [dictionary] - example for packet layers that can be used to build a packet. ordered in an hierarchy tree +* *Description* - returns a *suggested* dictionary of protocols ordered in a hierarchy tree. + +User can still create non valid hierarchies. (such as Ether()/DNS()/IP()) +* *Parameters* - none +* *Result* [dictionary] - Example for packet layers that can be used to build a packet. Ordered in an hierarchy tree. *Example:* @@ -606,7 +602,7 @@ Server IP address: 10.0.0.1 * Run the Python Interpreter (Scapy Server currently supports Python2) * Import the scapy_zmq_server.py file * Create a Scapy_server Object with argument as port number. default argument is port 4507 -* Invoke method activate. this is blocking because the server is listening on the port +* Invoke method activate(). (This method is blocking because the server is listening on the port). [source,bash] ---- -- cgit 1.2.3-korg From 34151a07567c5eb2d8af0ee46e52de3c09718469 Mon Sep 17 00:00:00 2001 From: imarom Date: Sun, 21 Aug 2016 03:37:40 +0300 Subject: CORE MASK doc --- images/core_mask_pin.png | Bin 0 -> 75239 bytes images/core_mask_split.png | Bin 0 -> 77862 bytes trex_rpc_server_spec.asciidoc | 4 +- trex_stateless.asciidoc | 189 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 192 insertions(+), 1 deletion(-) create mode 100644 images/core_mask_pin.png create mode 100644 images/core_mask_split.png diff --git a/images/core_mask_pin.png b/images/core_mask_pin.png new file mode 100644 index 00000000..63893484 Binary files /dev/null and b/images/core_mask_pin.png differ diff --git a/images/core_mask_split.png b/images/core_mask_split.png new file mode 100644 index 00000000..7b0a456c Binary files /dev/null and b/images/core_mask_split.png differ diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 7d016112..15bfc97f 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -1115,6 +1115,7 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj * *Parameters* ** *handler* ['string'] - unique connection handler ** *port_id* ['int'] - port id on which to start traffic +** *core_mask* ['uint64'] [*optional*] - a non zero mask to specify which cores will be active during TX, if no value is provided, the value is all bits on (MAX_UINT64) * *Result* ['object'] - {} @@ -1128,9 +1129,10 @@ In case rx_stats feature is enabled, rx_object **must include** all rx_stats obj "jsonrpc": "2.0", "method": "start_traffic", "params": { - "api_h": "SPhoCDIV", + "api_h": "SPhoCDIV", "handler": "37JncCHr", "port_id": 3 + "core_mask": 0xff } 'Response': diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 88eed076..c55b6520 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -3673,6 +3673,195 @@ try: ---- +=== Performance Tweaking +In this section we provide some advanced features to help get the most of TRex performance. +The reason that those features are not active out of the box because they might have +some impact on other areas and in general, might sacrafice one or more properties +that requires the user to explicitly give up on those. + +==== Caching MBUFs +** + +==== Core masking per interface +By default, TRex will regard any TX command with a **greedy approach**: +All the DP cores associated with this port will be assigned in order to produce the maximum +throughput. + +image::images/core_mask_split.png[title="Greedy Approach - Splitting",align="left",width={p_width}, link="images/core_mask_split.png"] + +However, in some cases it might be beneficial to provide a port with a subset of the cores to use. + + +For example, when injecting traffic on two ports and the following conditions are met: + +* the two ports are adjacent +* the profile is symmetric + +Due to TRex architecture, adjacent ports (e.g. port 0 & port 1) shares the same cores, +and using the greedy approach will cause all the cores to transmit on both port 0 and port 1. + +When the profile is *symmetric* it will be wiser to pin half the cores to port 0 and half +the cores to port 1 and thus avoid cache trashing and bouncing. +If the profile is not symmetric, the static pinning may deny CPU cycles from the more congested port. + +image::images/core_mask_pin.png[title="Pinning Cores To Ports",align="left",width={p_width}, link="images/core_mask_pin.png"] + +TRex provides this in two ways: + + +==== Predefind modes + +As said above, the default mode is 'split' mode, but you can provide a predefined mode called 'pin'. +This can be done by both API and from the console: + +[source,bash] +---- + +trex>start -f stl/syn_attack.py -m 40mpps --total -p 0 1 --pin <-- provide '--pin' to the command + +Removing all streams from port(s) [0, 1]: [SUCCESS] + + +Attaching 1 streams to port(s) [0]: [SUCCESS] + + +Attaching 1 streams to port(s) [1]: [SUCCESS] + + +Starting traffic on port(s) [0, 1]: [SUCCESS] + +60.20 [ms] + +trex> + +---- + +[source,bash] +---- + +We can see in the CPU util. available from the TUI window, +that each core was reserverd for an interface: + +Global Stats: + +Total Tx L2 : 20.49 Gb/sec +Total Tx L1 : 26.89 Gb/sec +Total Rx : 20.49 Gb/sec +Total Pps : 40.01 Mpkt/sec <-- performance meets the requested rate +Drop Rate : 0.00 b/sec +Queue Full : 0 pkts + + +Cpu Util(%) + + Thread | Avg | Latest | -1 | -2 | -3 | -4 | -5 | -6 | -7 | -8 + + 0 (0) | 92 | 92 | 92 | 91 | 91 | 92 | 91 | 92 | 93 | 94 + 1 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + 2 (1) | 96 | 95 | 95 | 96 | 96 | 96 | 96 | 95 | 94 | 95 + 3 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + 4 (0) | 92 | 93 | 93 | 91 | 91 | 93 | 93 | 93 | 93 | 93 + 5 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + 6 (1) | 88 | 88 | 88 | 88 | 88 | 88 | 88 | 88 | 87 | 87 + 7 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + +---- + + +If we had used the *default mode*, the table should have looked like this, and yield +much worse performance: + +[source,bash] +---- + +Global Stats: + +Total Tx L2 : 12.34 Gb/sec +Total Tx L1 : 16.19 Gb/sec +Total Rx : 12.34 Gb/sec +Total Pps : 24.09 Mpkt/sec <-- performance is quite low than requested +Drop Rate : 0.00 b/sec +Queue Full : 0 pkts + +Cpu Util(%) + + Thread | Avg | Latest | -1 | -2 | -3 | -4 | -5 | -6 | -7 | -8 + + 0 (0,1) | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 + 1 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + 2 (0,1) | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 + 3 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + 4 (0,1) | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 + 5 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + 6 (0,1) | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 | 100 + 7 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + +---- + +This feature is also available from the Python API by providing: +*CORE_MASK_SPLIT* or *CORE_MASK_PIN* to the start API. + + +==== Manual mask +Sometimes for debug purposes or for a more advanced core scheduling you might want +to provide a manual masking that will guide the server on which cores to use. + +For example, let's assume we have a profile that utilize 95% of the traffic on one side, +and in the other direction it provides 5% of the traffic. +Let's assume also we have 8 cores assigned to the two interfaces. + +We want to assign 3 cores to interface 0 and 1 core only to interface 1. + +We can provide this line to the console (or for the API by providing a list of masks to the start +command): + +[source,bash] +---- +trex>start -f stl/syn_attack.py -m 10mpps --total -p 0 1 --core_mask 0xE 0x1 + +Removing all streams from port(s) [0, 1]: [SUCCESS] + + +Attaching 1 streams to port(s) [0]: [SUCCESS] + + +Attaching 1 streams to port(s) [1]: [SUCCESS] + + +Starting traffic on port(s) [0, 1]: [SUCCESS] + +37.19 [ms] + +trex> +---- + +The following output is received on the TUI CPU util window: + +[source,bash] +---- + +Total Tx L2 : 5.12 Gb/sec +Total Tx L1 : 6.72 Gb/sec +Total Rx : 5.12 Gb/sec +Total Pps : 10.00 Mpkt/sec +Drop Rate : 0.00 b/sec +Queue Full : 0 pkts + +Cpu Util(%) + + Thread | Avg | Latest | -1 | -2 | -3 | -4 | -5 | -6 | -7 | -8 + + 0 (1) | 45 | 45 | 45 | 45 | 45 | 45 | 46 | 45 | 46 | 45 + 1 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + 2 (0) | 15 | 15 | 14 | 15 | 15 | 14 | 14 | 14 | 14 | 14 + 3 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + 4 (0) | 14 | 14 | 14 | 14 | 14 | 14 | 14 | 14 | 15 | 14 + 5 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + 6 (0) | 15 | 15 | 15 | 15 | 15 | 15 | 15 | 15 | 15 | 15 + 7 (IDLE) | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + +---- + === Reference Additional profiles and examples are available in the `stl/hlt` folder. -- cgit 1.2.3-korg From 34bc2c1df383f799342a43ebfdc588b95f04b6bd Mon Sep 17 00:00:00 2001 From: imarom Date: Sun, 21 Aug 2016 03:38:34 +0300 Subject: link to stateless in stateful mimicking --- trex_book_basic.asciidoc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/trex_book_basic.asciidoc b/trex_book_basic.asciidoc index fd2db450..5c8af732 100755 --- a/trex_book_basic.asciidoc +++ b/trex_book_basic.asciidoc @@ -2721,7 +2721,10 @@ $.sudo /t-rex-64 -f cap2/imix_64.yaml -c 4 -m 1 -d 100 -l 1000 |======================== -=== Stateless traffic generation +=== Mimicking stateless traffic under stateful mode +[NOTE] +TRex now supports a true stateless traffic generation. +If you are looking for stateless traffic, please visit the following link: xref:trex_stateless.html[TRex Stateless Support] With this feature you can "repeat" flows and create stateless, *IXIA* like streams. After injecting the number of flows defined by `limit`, TRex repeats the same flows. If all template has a `limit` the CPS will be zero after a time as there are no new flows after the first iteration. -- cgit 1.2.3-korg From c15438ed1e4682768e500016e916e1e393f5be5f Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Tue, 23 Aug 2016 20:33:05 +0300 Subject: ubuntu 16 disable updates, update create config script info, loopback doc update --- images/loopback_example.png | Bin 0 -> 82924 bytes release_notes.asciidoc | 6 ++-- trex_book.asciidoc | 79 +++++++++++++++++++++++--------------------- 3 files changed, 45 insertions(+), 40 deletions(-) create mode 100755 images/loopback_example.png diff --git a/images/loopback_example.png b/images/loopback_example.png new file mode 100755 index 00000000..71cbb053 Binary files /dev/null and b/images/loopback_example.png differ diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 80c54803..a2537059 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -31,8 +31,10 @@ endif::backend-docbook[] [source,bash] ---- -$sudo ./dpdk_setup_ports.py -t # show the list of ports -$sudo ./dpdk_setup_ports.py -c 03:00.0 03:00.1 -o /etc/trex_cfg.yaml # create optimum /etc/trex_cfg.yaml file +$sudo ./dpdk_setup_ports.py -t # show the list of ports +$sudo ./dpdk_setup_ports.py -l # return DPDK interfaces to Linux (if there is proper Linux driver) +$sudo ./dpdk_setup_ports.py -i # interactive creation of config file +$sudo ./dpdk_setup_ports.py -c 03:00.0 03:00.1 -o /etc/trex_cfg.yaml # create optimum /etc/trex_cfg.yaml file ---- * Basic Cisco VIC functionality works now. Advanced Stateless/Stateful functionality is still not supported. diff --git a/trex_book.asciidoc b/trex_book.asciidoc index b8de9f35..1df2dac3 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -256,7 +256,10 @@ ISO images for supported Linux releases can be downloaded from: | link:http://fedora-mirror01.rbc.ru/pub/fedora/linux/releases/21/Server/x86_64/iso/Fedora-Server-DVD-x86_64-21.iso[Fedora 21] | link:http://fedora-mirror01.rbc.ru/pub/fedora/linux/releases/21/Server/x86_64/iso/Fedora-Server-21-x86_64-CHECKSUM[Fedora 21 CHECKSUM] | link:http://old-releases.ubuntu.com/releases/14.04.1/ubuntu-14.04-desktop-amd64.iso[Ubuntu 14.04.1] - | http://old-releases.ubuntu.com/releases/14.04.1/SHA256SUMS[Ubuntu 14.04 CHECKSUM] + | http://old-releases.ubuntu.com/releases/14.04.1/SHA256SUMS[Ubuntu 14.04* CHECKSUMs] +| link:http://releases.ubuntu.com/16.04.1/ubuntu-16.04.1-server-amd64.iso[Ubuntu 16.04.1] + | http://releases.ubuntu.com/16.04.1/SHA256SUMS[Ubuntu 16.04* CHECKSUMs] + |====================================== For Fedora downloads... @@ -285,6 +288,9 @@ xref:fedora21_example[Example of installing Fedora 21 Server] ===================================== * To use TRex, you should have sudo on the machine or the root password. * Upgrading the linux Kernel using `yum upgrade` requires building the TRex drivers. + * In Ubuntu 16, auto-updater is enabled by default. It's advised to turn it off as with update of Kernel need to compile again the DPDK .ko file. + +Command to remove it: + + > sudo apt-get remove unattended-upgrades ===================================== ==== Verify Intel NIC installation @@ -343,23 +349,16 @@ $wget --no-cache $WEB_URL/release/vX.XX.tar.gz #<1> === Running TRex for the first time in loopback -If you have a 10Gb/sec TRex (based on Intel 520-D2 NICs), you can verify that it works correctly by loopback on the ports. -You can install Intel SFP+ or Cisco SFP+, but you cannot connect ports that are on the same NIC to each other (it might not sync). -If you have only one NIC of 10gb/sec you cannot perform this test because the ports will not have a valid link. -Another option for loopback is to use link:http://www.fiberopticshare.com/tag/cisco-10g-twinax[Cisco twinax copper cable]. - -//TBD: perhaps rephase, using a "Prerequisites" or "Required" heading. The requirement here would be: Two (2) 10gb/sec NICs -//[hh] it is not accurate beacuse with 1Gb/sec you can have this test - -.Correct loopback -image:images/loopback_right.png[title="Correct Loopback"] - -.Wrong loopback -image:images/loopback_wrong.png[title="Wrong Loopback"] +Before jumping to check the DUT, you could verify TRex and NICs working in loopback. + +For performance-wise, it's better to connect interfaces on the same NUMA (controlled by one physical processor) + +However, if you have a 10Gb/sec interfaces (based on Intel 520-D2 NICs), and you connect ports that are on the same NIC to each other with SFP+, it might not sync. + +We have checked many SFP+ (Intel/Cisco/SR/LR) and had link. + +If you are still facing this issue you could either try to connect interfaces of different NICs or use link:http://www.fiberopticshare.com/tag/cisco-10g-twinax[Cisco twinax copper cable]. -If you have a 1Gb/Sec Intel NIC (I350) or XL710/X710 NIC, you can do anything you like from the loopback perspective *but* first filter the management port - see xref:trex_config[TRex Configuration]. +.Loopback example +image:images/loopback_example.png[title="Loopback example"] -// above, clarify "you can do anything you like from the loopback perspective" +If you have a 1Gb/Sec Intel NIC (I350) or XL710/X710 NIC, you can connect any port to any port from loopback perspective *but* first filter the management port - see xref:trex_config[TRex Configuration]. ==== Identify the ports @@ -440,48 +439,52 @@ and example ==== Script for creating config file -We provide a script to create configuration file by specifying as input desired interfaces. -Examples of using: +===== Interactive mode + +[source,bash] +---- +sudo ./dpdk_setup_ports.py -i +---- + +Will be printed table with all interfaces and related information. + +Then, user is asked to provide desired interfaces, MAC destinations etc. -* Get view of all ports as table: +===== Specifying input arguments from CLI + +Another option is to run script with all the arguments given directly from CLI. + +Run this command to see list of all interfaces and related information: [source,bash] ---- sudo ./dpdk_setup_ports.py -t ---- -* Create config file with given interfaces. You can use PCI addresses or Linux IF (eth1 etc.): +* In case of Loopback, no need to provide destination MACs. + +Will be assumed connection 0↔1, 2↔3 etc. + +Just run: [source,bash] ---- -sudo ./dpdk_setup_ports.py -c 13:00.0 eth3 --dump -o /etc/trex_cfg.yaml -### Config file generated by dpdk_setup_ports.py ### +sudo ./dpdk_setup_ports.py -c ... +---- -- port_limit: 2 - version: 2 - interfaces: ['13:00.0', '1b:00.0'] - port_info: - - dest_mac: [0x00, 0x0c, 0x29, 0x2a, 0x99, 0xda] # MAC OF LOOPBACK TO IT'S DUAL INTERFACE - src_mac: [0x00, 0x0c, 0x29, 0x2a, 0x99, 0xd0] - - dest_mac: [0x00, 0x0c, 0x29, 0x2a, 0x99, 0xd0] # MAC OF LOOPBACK TO IT'S DUAL INTERFACE - src_mac: [0x00, 0x0c, 0x29, 0x2a, 0x99, 0xda] +* In case of router (or other next hop device), should be specified MACs of router interfaces as destination. - platform: - master_thread_id: 0 - latency_thread_id: 1 - dual_if: - - socket: 0 - threads: [2] +[source,bash] +---- +sudo ./dpdk_setup_ports.py -c ... --dest-macs ... ---- -.Arguments of dpdk_setup_ports.py script +* In case of Switch, it's NICs should be transparent to config: if DUT is router, use Router's MACs. If loopback, use TRex MACs. (See previous bullets) + +.Additional arguments to creating script (dpdk_setup_ports.py -c) [options="header",cols="2,5,3",width="100%"] |================= | Arg | Description | Example | -c | Create a configuration file by specified interfaces (PCI address or Linux names: eth1 etc.) | -c 03:00.1 eth1 eth4 84:00.0 | --dump | Dump created config to screen. | | -o | Output the config to this file. | -o /etc/trex_cfg.yaml -| --dest-macs | Destination MACs to be used in created yaml file per each interface. Without specifying the option, will be assumed loopback (0⇔1, 2⇔3 etc.) | --dest-macs 11:11:11:11:11:11 22:22:22:22:22:22 +| --dest-macs | Destination MACs to be used in created yaml file per each interface. Without specifying the option, will be assumed loopback (0↔1, 2↔3 etc.) | --dest-macs 11:11:11:11:11:11 22:22:22:22:22:22 | --ci | Cores include: White list of cores to use. Make sure there is enough for each NUMA. | --ci 0 2 4 5 6 | --ce | Cores exclude: Black list of cores to exclude. Make sure there will be enough for each NUMA. | --ci 10 11 12 | --no-ht | No HyperThreading: Use only one thread of each Core in created config yaml. | -- cgit 1.2.3-korg From 6c537c2b281004f7214dbb9cd3f3805678dd5390 Mon Sep 17 00:00:00 2001 From: imarom Date: Wed, 24 Aug 2016 06:34:49 +0300 Subject: Client clustering doc --- images/client_clustering_topology.png | Bin 0 -> 204458 bytes trex_book.asciidoc | 143 ++++++++++++++++++++++++++-------- 2 files changed, 109 insertions(+), 34 deletions(-) create mode 100644 images/client_clustering_topology.png diff --git a/images/client_clustering_topology.png b/images/client_clustering_topology.png new file mode 100644 index 00000000..cb235c7a Binary files /dev/null and b/images/client_clustering_topology.png differ diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 1df2dac3..8b4a2dd6 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -860,60 +860,135 @@ asr1k(config)#ipv6 route 5000::/64 3001::2 <5> PBR configuraion -=== Source MAC address mapping using a file +=== Client clustering configuration +Trex supports testing a complex topology by a feature called "client clustering". +This feature allows a more detailed clustering of clients. -Extends the source MAC address replacment capability. Enables mapping between IPv4->MAC using the `--mac` CLI switch. The file format is YAML. +Let's assume the following topology: -*Example:*:: -[source,bash] ----- -$sudo ./t-rex-64 -f cap2/sfr_delay_10_1g.yaml -c 4 -l 100 -d 100000 -m 30 --mac cap2/test_example.yaml ----- +image:images/client_clustering_topology.png[title="Client Clustering"] -*MAC file structure:*:: -[source,python] ----- -- items : - - ip : "16.0.0.1" - mac : [0x16,0x1,0x4,0x5,0x6,0x7] - - ip : "16.0.0.2" - mac : [0x16,0x2,0x0,0x1,0x0,0x0] ----- +We would like to configure two clusters and direct traffic to them. -*Limitations:*:: +Using a config file, you can instruct TRex to generate clients +with specific configuration per cluster. -. It is assumed that most clients have a MAC address. At least 90% of IPs should have MAC address mapping. +A cluster configuration includes: -=== Destination MAC address spreading +* IP start range +* IP end range +* Initator side configuration +* Responder side configuration -anchor:mac_spread[] +[NOTE] +It is important to state that this is *complimentry* to the client generator +configured per profile - it only defines how the generator will be clustered. -Using this option, one can send traffic to few destination devices. In normal mode, all packets are sent to the port destination MAC address. -To enable this option, add `--mac-spread` to the command line. +Let's take a look at an example: -Example: +We have a profile which defines a client generator: [source,bash] ---- -$sudo ./t-rex-64 -f cap2/http_simple.yaml -d 1000 -m 1000 -c 4 -l 100 --mac-spread 2 +$more cap2/dns.yaml +- duration : 10.0 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.0.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 1 + udp_aging : 1 + mac : [0x00,0x00,0x00,0x01,0x00,0x00] + cap_info : + - name: cap2/dns.pcap + cps : 1.0 + ipg : 10000 + rtt : 10000 + w : 1 ---- -In this example, TRex sends to port destination MAC and port destination MAC +1. Using a switch, you can connect TRex to multiple devices under test (DUTs). -All of the DUTs return the traffic only to the correct port source address. -// above, i removed "should" - verify accuracy + +We would like to create two clusters of 4 devices each. +We would also like to divide *80%* of the traffic to the upper cluster +and *20%* to the lower cluster. + +We create a cluster configuration file in YAML: [source,bash] ---- - switch A switch A - | | - | D0+0 -> DUT0 <- D1+0 | -TRex(0) -| |-TRex(1) - | | - | D0+1 -> DUT1 <- D1+1 | - | +# +# Client configuration example file +# The file must contain the following fields +# +# 'vlan' - is the entire configuration under VLAN +# if so, each client group must include vlan +# configuration +# +# 'groups' - each client group must contain a range of IP +# and initiator and responder maps +# 'count' represents the number of MAC devices +# on the group. +# +# initiator and responder can contain 'vlan', 'src_mac', 'dst_mac' +# + +# each group contains a double way VLAN configuration +vlan: true <1> + +groups: + +- ip_start : 16.0.0.1 <2> + ip_end : 16.0.0.204 + initiator : <3> + vlan : 100 + dst_mac : "00:00:00:01:00:00" + responder : <4> + vlan : 200 + dst_mac : "00:00:00:01:00:00" + + count : 4 + +- ip_start : 16.0.0.205 + ip_end : 16.0.0.255 + initiator : + vlan : 101 + dst_mac : "01:00:00:00:01:01" + + responder: + vlan : 201 + dst_mac : "01:00:00:00:02:01" + + count : 4 ---- +The above configuration will divide the generator range of 255 clients to two clusters, +where each has 4 devices and VLAN on both ways. + +MACs will be allocated incrementaly with a wrap around. + +e.g. + +* 16.0.0.1 --> 00:00:00:01:00:00 +* 16.0.0.2 --> 00:00:00:01:00:01 +* 16.0.0.3 --> 00:00:00:01:00:03 +* 16.0.0.4 --> 00:00:00:01:00:04 +* 16.0.0.5 --> 00:00:00:01:00:00 +* 16.0.0.6 --> 00:00:00:01:00:01 + +and so on. + +*Usage:* + +[source,bash] +---- +sudo ./t-rex-64 -f cap2/dns.yaml --client_cfg my_cfg.yaml -c 4 -d 100 +---- === NAT support -- cgit 1.2.3-korg From 435f94ee118cdfe9715b4580218cf0cd8026614a Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Wed, 24 Aug 2016 16:31:23 +0300 Subject: v2.08 --- release_notes.asciidoc | 4 ++-- trex_stateless.asciidoc | 31 ++++++++++++++++++++++++++++++- 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index a2537059..7f0d64d6 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -26,7 +26,7 @@ endif::backend-docbook[] == Release 2.08 == * Scapy JSON-RPC server for GUI packet crafting, see link:trex_scapy_rpc_server.html[trex_scapy_rpc_server] -* Client.start Python API supports Core mask - significantly improve the Stateless performance. link:cp_stl_docs/_modules/trex_stl_lib/trex_stl_client.html#STLClient.start[start API], *TBD*- Itay to update the pointer here +* Client.start Python API supports Core mask - significantly improve the Stateless performance. link:cp_stl_docs/_modules/trex_stl_lib/trex_stl_client.html#STLClient.start[start API], and link:trex_stateless.html#_core_masking_per_interface[core_masking] * Upgrade the ./dpdk_setup_ports.py script. It simplifies the way to create first time *optimized* config file (/etc/trex_cfg.yaml). More info at the manual: link:trex_manual.html#_script_for_creating_config_file[Script for creating config file] [source,bash] @@ -37,7 +37,7 @@ $sudo ./dpdk_setup_ports.py -i # interactive creation of config file $sudo ./dpdk_setup_ports.py -c 03:00.0 03:00.1 -o /etc/trex_cfg.yaml # create optimum /etc/trex_cfg.yaml file ---- -* Basic Cisco VIC functionality works now. Advanced Stateless/Stateful functionality is still not supported. +* Preliminary Cisco VIC support. Advanced Stateless/Stateful functionality is still not supported. * Enforce latest firmware for XL710/X710 (5.04) * Add a way to stop/close NICS at TRex termination (link would be down) `-close-at-end` * IPv6 XL710 ICMP packets are supported now diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index c55b6520..8b431604 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -1767,6 +1767,8 @@ The following example demonstrates varies the packet size randomly, as follows: ==== Tutorial: Field Engine, Significantly improve performance +anchor:trex_cache_mbuf[] + The following example demonstrates a way to significantly improve Field Engine performance in case it is needed. Field Engine has a cost of CPU instructions and CPU memory bandwidth. There is a way to significantly improve performance by caching the packets and run the Field Engine offline(before sending the packets). @@ -3680,7 +3682,10 @@ some impact on other areas and in general, might sacrafice one or more propertie that requires the user to explicitly give up on those. ==== Caching MBUFs -** + + +see xref:trex_cache_mbuf[here] + ==== Core masking per interface By default, TRex will regard any TX command with a **greedy approach**: @@ -3736,6 +3741,22 @@ trex> ---- + +.API example to PIN cores +[source,python] +---- + c.start(ports = [port_a, port_b], mult = rate,core_mask=STLClient.CORE_MASK_PIN) <1> +---- +<1> core_mask = STLClient.CORE_MASK_PIN + +.API example to MASK cores +[source,python] +---- + c.start(ports = [port_a, port_b], mult = rate, core_mask=[0x1,0x2])<1> +---- +<1> DP Core 0 (mask==1) is assign to port 1 and DP core 1 (mask==2) is for port 2 + + [source,bash] ---- @@ -3835,6 +3856,14 @@ Starting traffic on port(s) [0, 1]: [SUCCESS] trex> ---- +[source,python] +---- + c.start(ports = [port_a, port_b], mult = rate,core_mask=[0x0xe,0x1]) <1> +---- +<1> mask of cores per port + + + The following output is received on the TUI CPU util window: [source,bash] -- cgit 1.2.3-korg From 0df4b72cf67c310965b33ba4cb9a5ee3ffc85ff2 Mon Sep 17 00:00:00 2001 From: itraviv Date: Wed, 24 Aug 2016 17:44:49 +0300 Subject: fixed minor typos and details --- trex_scapy_rpc_server.asciidoc | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/trex_scapy_rpc_server.asciidoc b/trex_scapy_rpc_server.asciidoc index 5e181ece..ed752b68 100755 --- a/trex_scapy_rpc_server.asciidoc +++ b/trex_scapy_rpc_server.asciidoc @@ -42,7 +42,7 @@ Error codes are given according to this table: [also follows the JSON-RPC spec, [options="header",cols="^1,^h,3a"] |================= | Error Code | Message | Meaning -| -32700 | Parse Error | Invalid JSON was received by the server. An error occurred on the server while parsing the JSON text. +| -32700 | Parse Error | Invalid JSON was received by the server. An error occurred on the server while parsing the JSON input. | -32600 | Invalid Request | The JSON sent is not a valid Request object. | -32601 | Method not found | The method does not exist / is not available | -32603 | Invalid params | Invalid method parameter(s) @@ -57,7 +57,7 @@ Error codes are given according to this table: [also follows the JSON-RPC spec, == Data Bases and Data Structures used in Scapy Server === Protocol Field Description -This data sturcture contains the name of the field, its type and the default value assigned. + +This data structure contains the name of the field, its type and the default value assigned. + + Has the following structure: + @@ -75,7 +75,7 @@ this is the 'dst' field for the 'Ether' protocol === Offsets Dictionary and Offset Entry -==== The *"Offset Entry"* data sturcture contains the offset of a field within the *layer*, and its size. + +==== The *"Offset Entry"* data structure contains the offset of a field within the *layer*, and it's size. + (both measured in Bytes) + + @@ -87,7 +87,7 @@ Has the following structure: + *Example:* + This is the 'src' field for the 'IP' protocol: + -the offset within the layer is 16 bytes, and the size of the field is 4 bytes (as defined in the IP spec) +The offset within the layer is 16 bytes, and the size of the field is 4 bytes (as defined in the IP spec) [source,bash] ---- 'dst': [16, 4] @@ -124,15 +124,15 @@ This is the Offsets Dictionary for the IP layer: + ---- -Each layer has a 'global_offset' key. this key represents the *offset of the layer within the packet*. + -In the example above, the IP layer starts at offset 0, and the field src is at offset 12 within the packet. + +Each layer has a 'global_offset' key. This key represents the *offset of the layer within the packet*. + +In the example above, the IP layer starts at offset 0, and the field 'src' is at offset 12 within the packet. + In the general case, a field's offset within the *packet* is calculated this way: + 'global_offset' + 'field_offset' === Protocol Dictionary The protocol dictionary contains the names for all supported protocols and layers for building packets. + -Each entry in this data base has the following format: + +Each entry in this database has the following format: + 'Protocol Name' : 'Protocol Field Description' + + @@ -165,7 +165,7 @@ Each entry in this data base has the following format: + === Fields Dictionary The fields dictionary contains mapping between a field's name and its regular expression, + -Which has the following structure: + +which has the following structure: + (field name, field RegEx) + Example: this is the Regex for the 'MACField' protocol @@ -186,14 +186,15 @@ The dictionary maintains its regular structure: ---- == RPC Commands -The following RPC commands are supported. please refer to data bases section for elaboration on given data bases +The following RPC commands are supported. Please refer to databases section for elaboration for each database. === Supported Methods * *Name* - supported_methods * *Description* - returns the list of all supported methods by Scapy Server and their parameters -* *Parameters* - a single empty string ('') will return *ALL* supported methods. + +* *Parameters* - the parameter ('all') will return *ALL* supported methods. + other string delivered as parameter will return True/False if the string matches a supported method name -* *Result* - according to input: empty string will return list of supported methods, otherwise will return True/False as mentioned +* *Result* - according to input: 'all' string will return list of supported methods, otherwise will return True/False as mentioned. + + The returned dictionary describes for each method it's number of parameters followed by a list of their names. *Example:* @@ -204,7 +205,7 @@ The following RPC commands are supported. please refer to data bases section for "jsonrpc": "2.0", "id": "1", "method": "supported_methods", - "params": [""] + "params": ["all"] } 'Result': @@ -282,10 +283,10 @@ The following RPC commands are supported. please refer to data bases section for ---- -=== Check if DataBase is updated +=== Check if Database is updated * *Name* - 'check_update' * *Description* - checks if both protocol database and fields database are up to date according to md5 comparison -* *Paramters* - md5 of database, md5 of fields +* *Parameters* - md5 of database, md5 of fields * *Result* - upon failure: error code -32098 (see link:trex_scapy_rpc_server.html#_error_codes[RPC server error codes]) + followed by a message: "Fields DB is not up to date" or "Protocol DB is not up to date" + upon success: return 'true' as result (see below) + -- cgit 1.2.3-korg From eca2ff16cf84612b976d0f4932574877db728249 Mon Sep 17 00:00:00 2001 From: itraviv Date: Wed, 24 Aug 2016 09:53:43 +0300 Subject: removed scapy server image and fixed its path --- images/Scapy_JSON_rpc_server.png | Bin 98993 -> 0 bytes trex_scapy_rpc_server.asciidoc | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100755 images/Scapy_JSON_rpc_server.png diff --git a/images/Scapy_JSON_rpc_server.png b/images/Scapy_JSON_rpc_server.png deleted file mode 100755 index 6e62e792..00000000 Binary files a/images/Scapy_JSON_rpc_server.png and /dev/null differ diff --git a/trex_scapy_rpc_server.asciidoc b/trex_scapy_rpc_server.asciidoc index ed752b68..e13c63d6 100755 --- a/trex_scapy_rpc_server.asciidoc +++ b/trex_scapy_rpc_server.asciidoc @@ -33,7 +33,7 @@ The server operates on a Request-Response basis *over ZMQ*, and does not support Read more about ZMQ link:http://zguide.zeromq.org/page:all[here] -image::images/Scapy_JSON_rpc_server.png[title="Scapy JSON RPC Server",align="left",width=800, link="images/Scapy_JSON_rpc_server.png"] +image::images/Scapy_JSON_rpc_server.png[title="Scapy JSON RPC Server",align="left",width=800, link="images/scapy_json_rpc_server.png"] === Error Codes -- cgit 1.2.3-korg From d3117cb9b36bdeaed940a1aed9d370fdd4c84dde Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Thu, 25 Aug 2016 09:51:13 +0300 Subject: script of config generation: rephrase + distinguish l2/l3 switch --- trex_book.asciidoc | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 8b4a2dd6..a69a73b1 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -459,7 +459,7 @@ Run this command to see list of all interfaces and related information: sudo ./dpdk_setup_ports.py -t ---- -* In case of Loopback, no need to provide destination MACs. + +* In case of *Loopback* and/or only *L1-L2 Switches* on the way, no need to provide destination MACs. + Will be assumed connection 0↔1, 2↔3 etc. + Just run: @@ -468,15 +468,13 @@ Just run: sudo ./dpdk_setup_ports.py -c ... ---- -* In case of router (or other next hop device), should be specified MACs of router interfaces as destination. +* In case of *Router* (or other next hop device, such as *L3 Switch*), should be specified MACs of router interfaces as destination. [source,bash] ---- sudo ./dpdk_setup_ports.py -c ... --dest-macs ... ---- -* In case of Switch, it's NICs should be transparent to config: if DUT is router, use Router's MACs. If loopback, use TRex MACs. (See previous bullets) - .Additional arguments to creating script (dpdk_setup_ports.py -c) [options="header",cols="2,5,3",width="100%"] |================= -- cgit 1.2.3-korg From 7af70d850f003d76cb57ce7386187dd853e44059 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Wed, 31 Aug 2016 03:18:47 +0300 Subject: fix typo in static routing IP. fix search path of Sphinx --- trex_config.asciidoc | 28 ++++++++++++++-------------- ws_main.py | 8 +++++--- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/trex_config.asciidoc b/trex_config.asciidoc index c08cac4d..42f21b62 100755 --- a/trex_config.asciidoc +++ b/trex_config.asciidoc @@ -116,7 +116,7 @@ image::images/TrexConfig.png[title="TRex/Router setup"] == TRex configuration * TRex act as both client and server side -* TRex port mac addrees should configure correctly, so packet generated from port 1 will get to 2 and vice-versa +* TRex port mac address should configure correctly, so packet generated from port 1 will get to 2 and vice-versa * To use the config file you can add this switch `--cfg [file]` * Or edit the configuration file in `/etc/trex_cfg.yaml` @@ -130,14 +130,14 @@ image::images/TrexConfig.png[title="TRex/Router setup"] src_mac : [0x0,0x0,0x0,0x4,0x0,0x00] <4> ---- <1> Correspond to TRex port 0 - should be Router TenG 0/0/0 mac-address -<2> Should be distinc mac-addrees, router should be configure to sent to this mac-addrees +<2> Should be distinc mac-address, router should be configure to sent to this mac-address <3> Correspond to TRex port 1 - should be Router TenG 0/0/1 mac-address -<4> Should be distinc mac-addrees, router should be configure to sent to this mac-addrees +<4> Should be distinc mac-address, router should be configure to sent to this mac-address == Router configuration PBR part 1 -* Router moves packets from port 0->1 and 1->0 without looking into IP addrees. +* Router moves packets from port 0->1 and 1->0 without looking into IP address. * TenG 0/0/0 <-> TenG 0/0/1 @@ -161,11 +161,11 @@ interface TenGigabitEthernet0/0/1 load-interval 30 ! ---- -<1> Configure mac-addrees to match TRex destination port-0 +<1> Configure mac-address to match TRex destination port-0 <2> Set MTU -<3> Set an ip addrees ( routing can't work without this) +<3> Set an ip address ( routing can't work without this) <4> Configure PBR policy - see next slide -<5> Configure mac-addrees to match TRex destination port-1 +<5> Configure mac-address to match TRex destination port-1 == Router configuration PBR part 2 @@ -186,16 +186,16 @@ route-map p2_to_p1 permit 10 == Router configuration PBR part 3 -* What about destination mac-address it should be TRex source mac-addrees? -* The folowing configuration addrees it +* What about destination mac-address it should be TRex source mac-address? +* The folowing configuration address it [source,python] ---- arp 11.11.11.12 0000.0002.0000 ARPA <1> arp 12.11.11.12 0000.0004.0000 ARPA <2> ---- -<1> Destination mac-addrees of packets sent from If 0/0/0 is matched to TRex source mac-address port-0 -<2> Destination mac-addrees of packets sent from If 0/0/1 is matched to TRex source mac-address port-1 +<1> Destination mac-address of packets sent from If 0/0/0 is matched to TRex source mac-address port-0 +<2> Destination mac-address of packets sent from If 0/0/1 is matched to TRex source mac-address port-1 == Static-route configuration - TRex @@ -233,8 +233,8 @@ interface TenGigabitEthernet0/0/1 mtu 4000 ip address 22.11.11.11 255.255.255.0 ! -ip route 16.0.0.0 255.0.0.0 22.11.11.12 <1> -ip route 48.0.0.0 255.0.0.0 11.11.11.12 <2> +ip route 16.0.0.0 255.0.0.0 11.11.11.12 <1> +ip route 48.0.0.0 255.0.0.0 22.11.11.12 <2> ---- <1> Match the range of TRex YAML ( client side 0/0/0 ) <2> Match the range of TRex YAML ( server side 0/0/1) @@ -294,7 +294,7 @@ csi-mcp-asr1k-40(config)#ipv6 route 4000::/64 2001::2 csi-mcp-asr1k-40(config)#ipv6 route 5000::/64 3001::2 ---- <1> Enable ipv6 -<2> Add ipv6 addrees +<2> Add ipv6 address <3> Add pbr <4> Enable ipv6 routing <5> Mac-addr setting should be like TRex diff --git a/ws_main.py b/ws_main.py index 2a534dfc..8b2f99b6 100755 --- a/ws_main.py +++ b/ws_main.py @@ -248,7 +248,7 @@ def options(opt): def configure(conf): conf.find_program('asciidoc', path_list='/usr/bin/', var='ASCIIDOC') - conf.find_program('sphinx-build', path_list='/usr/local/bin/', var='SPHINX') + conf.find_program('sphinx-build', path_list='~/.local/bin /usr/local/bin/ /usr/bin', var='SPHINX') pass; def convert_to_pdf(task): @@ -797,7 +797,8 @@ def build_cp_docs (task): if not trex_core_git_path: # there exists a default directory or the desired ENV variable. return 1 trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc')) - build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( + build_doc_cmd = shlex.split("{sph} -W -b {bld} {src} {dst}".format( + sph= task.env['SPHINX'], bld= "html", src= ".", dst= out_dir) @@ -811,7 +812,8 @@ def build_stl_cp_docs (task): if not trex_core_git_path: # there exists a default directory or the desired ENV variable. return 1 trex_core_docs_path = os.path.abspath(os.path.join(trex_core_git_path, 'scripts', 'automation', 'trex_control_plane', 'doc_stl')) - build_doc_cmd = shlex.split("/usr/local/bin/sphinx-build -W -b {bld} {src} {dst}".format( + build_doc_cmd = shlex.split("{sph} -W -b {bld} {src} {dst}".format( + sph= task.env['SPHINX'], bld= "html", src= ".", dst= out_dir) -- cgit 1.2.3-korg From 20007b4d0903d290705152b22ba590e0c1eaf90e Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Fri, 9 Sep 2016 15:18:07 +0300 Subject: update FAQ --- trex_book.asciidoc | 16 +++++----- trex_faq.asciidoc | 52 +++++++++++++++++++++++++++++++++ trex_stateless.asciidoc | 6 ++++ visio_drawings/stl_streams_example.vsd | Bin 376832 -> 484352 bytes 4 files changed, 67 insertions(+), 7 deletions(-) diff --git a/trex_book.asciidoc b/trex_book.asciidoc index a69a73b1..79941f87 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -119,6 +119,7 @@ TRex curretly works on x86 architecture and can operate well on Cisco UCS hardwa | CPU Configuration | 2-Socket CPU configurations (also works with 1 CPU). | Memory | 2x4 banks for each CPU. Total of 32GB in 8 banks. | RAID | No RAID. +| Riser 1/2 | both left and right should support x16 PCIe. Right (Riser1) should be from option A x16 and Left (Riser2) should be x16. need to order both |================= .Supported NICs @@ -133,18 +134,19 @@ TRex curretly works on x86 architecture and can operate well on Cisco UCS hardwa | VMXNET / + VMXNET3 (see notes) | VMware paravirtualized | Connect using VMware vSwitch | E1000 | paravirtualized | VMware/KVM/VirtualBox +| Virtio | paravirtualized | KVM |================= // in table above, is it correct to list "paravirtualized" as chipset? Also, what is QSFP28? It does not appear on the lined URL. Clarify: is Intel X710 the preferred NIC? -.X710 NIC base SFP+ support -[options="header",cols="1,1,1",width="70%"] +.SFP+ support +[options="header",cols="2,1,1,1",width="70%"] |================= -| link:https://en.wikipedia.org/wiki/Small_form-factor_pluggable_transceiver[SFP+] | Intel Ethernet Converged X710-DAX | Silicom link:http://www.silicom-usa.com/PE310G4i71L_Quad_Port_Fiber_SFP+_10_Gigabit_Ethernet_PCI_Express_Server_Adapter_49[PE310G4i71L] (Open optic) -| link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Cisco SFP-10G-SR] | Does not work | [green]*works* -| link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Cisco SFP-10G-LR] | Does not work | [green]*works* -| link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Cisco SFP-H10GB-CU1M]| [green]*works* | [green]*works* -| link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Cisco SFP-10G-AOC1M] | [green]*works* | [green]*works* +| link:https://en.wikipedia.org/wiki/Small_form-factor_pluggable_transceiver[SFP+] | Intel Ethernet Converged X710-DAX | Silicom link:http://www.silicom-usa.com/PE310G4i71L_Quad_Port_Fiber_SFP+_10_Gigabit_Ethernet_PCI_Express_Server_Adapter_49[PE310G4i71L] (Open optic) | 82599EB 10-Gigabit +| link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Cisco SFP-10G-SR] | Does not work | [green]*works* | [green]*works* +| link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Cisco SFP-10G-LR] | Does not work | [green]*works* | [green]*works* +| link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Cisco SFP-H10GB-CU1M]| [green]*works* | [green]*works* | [green]*works* +| link:http://www.cisco.com/c/en/us/products/collateral/interfaces-modules/transceiver-modules/data_sheet_c78-455693.html[Cisco SFP-10G-AOC1M] | [green]*works* | [green]*works* | [green]*works* |================= [NOTE] diff --git a/trex_faq.asciidoc b/trex_faq.asciidoc index f1f0859f..2a6a742e 100644 --- a/trex_faq.asciidoc +++ b/trex_faq.asciidoc @@ -261,6 +261,8 @@ The number of threads is equal (dual_ports) * (-c value) ==== Incoming frames are from type SCTP why? Default latency packets are SCTP, you can remove `-l 1000` or change it to ICMP see manual for more info +==== Is there a configuration guide to Linux as a router (static ARP)? +have a look link:https://groups.google.com/forum/#!topic/trex-tgn/YQcQtA8A8hA[linux as a router] === Stateless @@ -406,4 +408,54 @@ See link:https://github.com/zverevalexei/trex-http-proxy[here] an example for si link:https://gerrit.fd.io/r/gitweb?p=csit.git;a=tree;f=resources;hb=HEAD[here] +==== Are you recommending TRex HLTAPI ? +TRex has minimal and basic support for HLTAPI. For simple use cases (without latency and per stream statistic) it probably will work. For advance use cases there is no replacement for native API that has full control and in some cases it is more simple/implicit to use. + +==== Can I test Qos using TRex ? +Yes. using Field Engine you can build streams with different TOS and get statistic/latency/jitter per stream + +==== Does latency stream support full line rate? +No. latency streams are handled by rx software and there is only one core to handle the traffic. +To workaround this you could create one stream in lower speed for latency (e.g. PPS=1K) and another one of the same type without latency. The latency stream will sample the DUT queues. For example, if the required latency resolution is 10usec there is no need to send a latency stream in speed higher than 100KPPS- usually queues are built over time, so it is not possible that one packet will have a latency and another packet in the same path will not have the same latency. The none latency stream could be in full line rate (e.g. 100MPPS) + +.Example +[source,Python] +-------- + stream = [STLStream(packet = pkt, + mode = STLTXCont(pps=1)), <1> + + + # latency stream + STLStream(packet = pkt, + mode = STLTXCont(pps=1000), <2> + flow_stats = STLFlowLatencyStats(pg_id = 12+port_id)) +-------- +<1> non latency stream will be amplified +<2> latency stream, the speed would be constant 1KPPS + + +==== Why Latency stream has a constant rate of 1PPS? +When you have the following example + +[source,Python] +-------- + stream = [STLStream(packet = pkt, + mode = STLTXCont(pps=1)), <1> + + + # latency stream + STLStream(packet = STLPktBuilder(pkt = base_pkt/pad_latency), + mode = STLTXCont(pps=1000), <2> + flow_stats = STLFlowLatencyStats(pg_id = 12+port_id)) +-------- +<1> non latency stream +<2> latency stream + + +and you give to start API a multiplier of 10KPPS the latency stream (#2) will keep the rate of 1000 PPS and won't be amplified. + + +==== What are the supported routing protocols ? +For now non. beacuse there is no tighe + diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 8b431604..4296b430 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -2901,6 +2901,12 @@ in the "Per stream statistics" section is also available. the rate of a latency stream, you need to manually edit your profile file. Usually this is not necessary, since normally you stress the system using non latency stream, and (in parallel) measure latency using constant rate latency stream. +[IMPORTANT] +===================================== +Latency streams are not supported in full line rate like normal streams. Both from, transmit and receive point of view. This is a design consideration to keep the latency measurement accurate and preserve CPU resource. One of the reason for doing so that in most cases it is enough to have a latency stream not in full rate. For example, if the required latency resolution is 10usec there is no need to send a latency stream in speed higher than 100KPPS- usually queues are built over time, so it is not possible that one packet will have a latency and another packet in the same path will not have the same latency. The none latency stream could be in full line rate (e.g. 100MPPS) to load the DUT while the low speed latency stream will measure this path latency. +Don't expect the total latency streams rate to be higher than 1-5MPPS +===================================== + Two examples follow. One using the console and the other using the Python API. *Console*:: diff --git a/visio_drawings/stl_streams_example.vsd b/visio_drawings/stl_streams_example.vsd index 9029b0a1..423781b8 100644 Binary files a/visio_drawings/stl_streams_example.vsd and b/visio_drawings/stl_streams_example.vsd differ -- cgit 1.2.3-korg From a83bac2d09cac13814f17809195ff88c0213c463 Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Wed, 21 Sep 2016 14:57:11 +0300 Subject: New section for stateless latency questions + small fixes --- trex_faq.asciidoc | 82 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/trex_faq.asciidoc b/trex_faq.asciidoc index 2a6a742e..793f3372 100644 --- a/trex_faq.asciidoc +++ b/trex_faq.asciidoc @@ -48,7 +48,7 @@ TRex is fast realistic open source traffic generation tool, running on standard [NOTE] ===================================== -Features terminating TCP can't be tested yet. +Features terminating TCP can not be tested yet. ===================================== ==== Who uses TRex? @@ -57,8 +57,8 @@ Cisco systems, Intel, Imperva, Melanox, Vasona networks and much more. ==== What are the Stateful and Stateless modes of operation? -'Stateful' mode is meant for testing networking gear which save state per flow (5 tuple). Usually, this is done by injecting pre recorded cap files on pairs of interfaces of the device under test, changing src/dst IP/port. -'Stateless' mode is meant to test networking gear, not saving state per flow (doing the decision on per packet bases). This is usually done by injecting customed packet streams to the device under test. +``Stateful'' mode is meant for testing networking gear which save state per flow (5 tuple). Usually, this is done by injecting pre recorded cap files on pairs of interfaces of the device under test, changing src/dst IP/port. +``Stateless'' mode is meant to test networking gear, not saving state per flow (doing the decision on per packet bases). This is usually done by injecting customed packet streams to the device under test. See link:trex_stateless.html#_stateful_vs_stateless[here] for more details. ==== Can TRex run on an hypervisor with virtual NICS? @@ -74,7 +74,7 @@ Limitations: ==== Why not all DPDK supported NICs supported by TRex? 1. We are using specific NIC features. Not all the NICs have the capabilities we need. -2. We have regression tests in our lab for each recommended NIC. We don't claim to support NICs we don't have in our lab. +2. We have regression tests in our lab for each recommended NIC. We do not claim to support NICs we do not have in our lab. ==== Is Cisco VIC supported? No. Currently its DPDK driver does not support the capabilities needed to run TRex. @@ -127,7 +127,7 @@ You have several ways you can help: + ==== What is the release process? How do I know when a new release is available? It is a continuous integration. The latest internal version is under 24/7 regression on few setups in our lab. Once we have enough content we release it to GitHub (Usually every few weeks). -We don't send an email for every new release, as it could be too frequent for some people. We announce big feature releases on the mailing list. You can always check the GitHub of course. +We do not send an email for every new release, as it could be too frequent for some people. We announce big feature releases on the mailing list. You can always check the GitHub of course. === Startup and Installation @@ -197,7 +197,7 @@ Then, you can find some basic examples link:trex_manual.html#_trex_command_line[ ==== TRex is connected to a switch and we observe many dropped packets at TRex startup. A switch might be configured with spanning tree enabled. TRex initializes the port at startup, making the spanning tree drop the packets. Disabling spanning tree can help. On Cisco nexus, you can do that using `spanning-tree port type edge` -This issue would be fixed when we consolidate 'Stateful' and 'Stateless' RPC. +This issue would be fixed when we consolidate ``Stateful'' and ``Stateless'' RPC. ==== I can not see RX packets TRex does not support ARP yet, you should configure the DUT to send the packets to the TRex port MAC address. From Stateless mode, you can change the port mode to promiscuous. + @@ -254,12 +254,12 @@ This example support 10M flows <1> 10M flows -==== ERROR The number of ips should be at least number of threads +==== I am getting and error: The number of ips should be at least number of threads The range of clients and servers should be at least the number of threads. -The number of threads is equal (dual_ports) * (-c value) +The number of threads is equal to (number of port pairs) * (-c value) -==== Incoming frames are from type SCTP why? -Default latency packets are SCTP, you can remove `-l 1000` or change it to ICMP see manual for more info +==== Incoming frames are of type SCTP. Why? +Default latency packets are SCTP, you can remove `-l 1000` or change it to ICMP see manual for more info. ==== Is there a configuration guide to Linux as a router (static ARP)? have a look link:https://groups.google.com/forum/#!topic/trex-tgn/YQcQtA8A8hA[linux as a router] @@ -299,18 +299,18 @@ OSError: /lib64/libc.so.6: version `GLIBC_2.14' not found (required by /home/shi Yes. Multiple TRex clients can connect to the same TRex server. -==== Can I create a corrupted packets? +==== Can I create corrupted packets? Yes. You can build any packet you like using Scapy. However, there is no way to create corrupted L1 fields (Like Ethernet FCS), since these are usually handled by the NIC hardware. -==== Why the performance is low? +==== Why is the performance low? What would reduce the performance: 1. Many concurent streams. 2. Complex field engine program. -Adding 'cache' directive can improve the performance. See link:trex_stateless.html#_tutorial_field_engine_significantly_improve_performance[here] +Adding ``cache'' directive can improve the performance. See link:trex_stateless.html#_tutorial_field_engine_significantly_improve_performance[here] and try this: @@ -343,7 +343,7 @@ $start -f stl/udp_1pkt_src_ip_split.py -m 100% See example link:trex_stateless.html#_tutorial_field_engine_many_clients_with_arp[here] -==== How do I create a deterministic random stream variable +==== How do I create deterministic random stream variable? use `random_seed` per stream @@ -356,30 +356,13 @@ use `random_seed` per stream ==== Can I have a synconization betwean different stream variables? -No. each stream has it own, seperate field engine program +No. each stream has its own, seperate field engine program. ==== Is there a plan to have LUAJit as a field engine program? It is a great idea to add it, we are looking for someone to contribute this support. - -==== Streams with latency enabled do not get amplified by multiplier, why? - -Reason for this (besides being a CPU constrained feature) is that most of the time, the use case is that you load the DUT using some traffic streams, and check latency -using different streams. The latency stream is kind of 'testing probe' which you want to keep at constant rate, while playing with the rate of your other (loading) streams. -So, you can use the multiplier to amplify your main traffic, without changing your 'testing probe'. -If you do want to amplify latency streams, you can do this using 'tunables'. -You can add in the Python profile a 'tunable' which will specify the latency stream rate and you can provide it to the 'start' command in the console or in the API. -Tunables can be added through the console using 'start ... -t latency_rate=XXXXX' -or using the Python API directly (for automation): -STLProfile.load_py(..., latency_rate = XXXXX) -You can see example for defining and using tunables link:trex_stateless.html#_tutorial_advanced_traffic_profile[here]. - -==== Latency and statistic per stream is not supported for all types of packets. - -Correct. We use NIC capabilities for counting the packets or directing them to be handled by software. Each NIC has its own capabilities. Look link:trex_stateless.html#_tutorial_per_stream_statistics[here] and link:/trex_stateless.html#_tutorial_per_stream_latency_jitter_packet_errors[here] for details. - ==== Java API instead of Python API Q:: I want to use the Python API via Java (with Jython), apparently, I can not import Scapy modules with Jython. @@ -409,12 +392,16 @@ link:https://gerrit.fd.io/r/gitweb?p=csit.git;a=tree;f=resources;hb=HEAD[here] ==== Are you recommending TRex HLTAPI ? -TRex has minimal and basic support for HLTAPI. For simple use cases (without latency and per stream statistic) it probably will work. For advance use cases there is no replacement for native API that has full control and in some cases it is more simple/implicit to use. +TRex has minimal and basic support for HLTAPI. For simple use cases (without latency and per stream statistic) it will probably work. For advanced use cases, there is no replacement for native API that has full control and in most cases is simpler to use. ==== Can I test Qos using TRex ? -Yes. using Field Engine you can build streams with different TOS and get statistic/latency/jitter per stream +Yes. Using Field Engine you can build streams with different TOS and get statistic/latency/jitter per stream + +==== What are the supported routing protocols TRex can emulate? +For now, none. You can connect your router to a switch with TRex and a machine running routem. Then, inject routes using routem, and other traffic using TRex. -==== Does latency stream support full line rate? +==== Latency and per stream statistics +===== Does latency stream support full line rate? No. latency streams are handled by rx software and there is only one core to handle the traffic. To workaround this you could create one stream in lower speed for latency (e.g. PPS=1K) and another one of the same type without latency. The latency stream will sample the DUT queues. For example, if the required latency resolution is 10usec there is no need to send a latency stream in speed higher than 100KPPS- usually queues are built over time, so it is not possible that one packet will have a latency and another packet in the same path will not have the same latency. The none latency stream could be in full line rate (e.g. 100MPPS) @@ -434,8 +421,12 @@ To workaround this you could create one stream in lower speed for latency (e.g. <2> latency stream, the speed would be constant 1KPPS -==== Why Latency stream has a constant rate of 1PPS? -When you have the following example +===== Latency stream has constant rate of 1PPS, and is not getting amplified by multiplier. Why? +Reason for this (besides being a CPU constrained feature) is that most of the time, the use case is that you load the DUT using some traffic streams, and check latency +using different streams. The latency stream is kind of ``testing probe'' which you want to keep at constant rate, while playing with the rate of your other (loading) streams. +So, you can use the multiplier to amplify your main traffic, without changing your ``testing probe''. + +When you have the following example: [source,Python] -------- @@ -452,10 +443,21 @@ When you have the following example <2> latency stream -and you give to start API a multiplier of 10KPPS the latency stream (#2) will keep the rate of 1000 PPS and won't be amplified. +If you speicify a multiplier of 10KPPS in start API, the latency stream (#2) will keep the rate of 1000 PPS and will not be amplified. + +If you do want to amplify latency streams, you can do this using ``tunables''. +You can add in the Python profile a ``tunable'' which will specify the latency stream rate and you can provide it to the ``start'' command in the console or in the API. +Tunables can be added through the console using ``start ... -t latency_rate=XXXXX'' +or using the Python API directly (for automation): +STLProfile.load_py(..., latency_rate = XXXXX) +You can see example for defining and using tunables link:trex_stateless.html#_tutorial_advanced_traffic_profile[here]. + + +===== Latency and per stream statistics are not supported for all packet types. + +Correct. We use NIC capabilities for counting the packets or directing them to be handled by software. Each NIC has its own capabilities. Look link:trex_stateless.html#_tutorial_per_stream_statistics[here] for per stream statistics and link:trex_stateless.html#_tutorial_per_stream_latency_jitter_packet_errors[here] for latency details. + -==== What are the supported routing protocols ? -For now non. beacuse there is no tighe -- cgit 1.2.3-korg From ea619323fd2e4992212c7f9e8bbc5c43badd5c62 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 26 Sep 2016 15:44:03 +0300 Subject: fix split dox --- trex_faq.asciidoc | 4 +- trex_rpc_server_spec.asciidoc | 39 ++++++++ trex_stateless.asciidoc | 217 +----------------------------------------- 3 files changed, 45 insertions(+), 215 deletions(-) diff --git a/trex_faq.asciidoc b/trex_faq.asciidoc index 793f3372..15a9560f 100644 --- a/trex_faq.asciidoc +++ b/trex_faq.asciidoc @@ -203,7 +203,7 @@ This issue would be fixed when we consolidate ``Stateful'' and ``Stateless'' RPC TRex does not support ARP yet, you should configure the DUT to send the packets to the TRex port MAC address. From Stateless mode, you can change the port mode to promiscuous. + Also, revisit your MAC address configuration in the TRex config file. Wrong MAC address configuration will cause all packets to be dropped. -==== Why is the performance low? +==== Why the performance is low? TRex performance depends on many factors: @@ -304,7 +304,7 @@ Yes. Multiple TRex clients can connect to the same TRex server. Yes. You can build any packet you like using Scapy. However, there is no way to create corrupted L1 fields (Like Ethernet FCS), since these are usually handled by the NIC hardware. -==== Why is the performance low? +==== Why the performance is low? What would reduce the performance: 1. Many concurent streams. diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index 15bfc97f..e7c669ca 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -692,6 +692,45 @@ Any element in the array can be one of the following object types: | step | uint64_t as string | step, how much to inc or dec. 1 is the default (in case of 'random' this field is not used) |================= + +===== repetable_random + +Instruction to choose a limited number of random values from a big range +The values could be deterministic by providing seed + +.Object type 'vm - flow_var' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | ''flow_var_rand_limit''' +| name | string | flow var name - this should be a unique identifier +| size | [1,2,4,8] | size of the var in bytes +| limit | uint64_t as string | the number of values to choose +| seed | uint64_t as string | seed of the random, in case there is no seed time will be taken +| min_value | uint64_t as string | minimum value for the field +| max_value | uint64_t as string | maximum value for the field +|================= + + +an example of tuple_flow_var variable + +[source,bash] +---- + size = 2 + limit = 5 + seed = 0x1234 + min_value = 0 + max_value = 10 +---- + +results could be + +[source,bash] +---- +7 , 8, 1 ,5, 2 , 7 , 8, 1 ,5, 2, 7 , 8, 1 ,5, 2 +---- + + ===== write_flow_var .Object type 'vm - write_flow_var' diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 4296b430..0b6ad8f7 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -2032,57 +2032,10 @@ This principal can be done for IPv6 too. ARP could be replaced with Neighbor Sol ==== Tutorial: Field Engine, split to core -The following example splits generated traffic into a number of threads. You can specify the field to use for determining how to split the traffic into threads. Without this feature, traffic is duplicated and all threads transmit the same traffic. (See the results tables in the examples below in this tutorial.) +Post v2.08 version split to core directive was deprecated and was kept for backward compatibility. +The new implementation is always to split as if the profile was sent from one core. +The user of TRex is oblivious to the number of cores. -*Without Split*:: - -Scenario: 2 transmitters, DP threads - -[source,python] ----- - def create_stream (self): - - # TCP SYN - base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S") - - - # vm - vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src", - min_value="16.0.0.0", - max_value="16.0.0.254", - size=4, op="inc"), <1> - - - STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ), <2> - - STLVmFixIpv4(offset = "IP"), # fix checksum - ] - - ) - ----- -<1> Define stream variable ip_src -<2> Write stream variable we defined to `IPv4.src`. - - -.Variable per thread -[format="csv",cols="1^,3^,3^", options="header",width="40%"] -|================= -pkt, thread-0 ip_src,thread-1 ip_src - 1 , 16.0.0.1 , 16.0.0.1 - 2 , 16.0.0.2 , 16.0.0.2 - 3 , 16.0.0.3 , 16.0.0.3 - 4 , 16.0.0.4 , 16.0.0.4 - 5 , 16.0.0.5 , 16.0.0.5 - 6 , 16.0.0.6, 16.0.0.6 -|================= - -* In the case shown above, all threads transmit the same packets. - - -*With Split feature enabled*:: - -Scenario: 2 transmitters, DP threads [source,python] ---- @@ -2107,41 +2060,8 @@ Scenario: 2 transmitters, DP threads ) ---- -<1> Same example as previous, but split by the `ip_src` stream variable. +<1> Deprecated split by field. not used any more (post v2.08) -.Variable per thread -[format="csv",cols="1^,3^,3^", options="header",width="40%"] -|================= -pkt, thread-0 ip_src ,thread-1 ip_src - 1 , 16.0.0.1 , 16.0.0.128 - 2 , 16.0.0.2 , 16.0.0.129 - 3 , 16.0.0.3 , 16.0.0.130 - 4 , 16.0.0.4 , 16.0.0.131 - 5 , 16.0.0.5 , 16.0.0.132 - 6 , 16.0.0.6, 16.0.0.133 -|================= - -* In this case the stream variable is split. - -To simulate this, using the `stl/udp_1pkt_range_clients_split.py` traffic profile, you can run the following command: - -[source,bash] ----- -$./stl-sim -f stl/udp_1pkt_range_clients_split.py -o a.pcap -c 2 -l 10 #<1> ----- -<1> Simulates 2 threads as specified by the `-c 2` option. - -.Variable per thread -[format="csv",cols="1^,3^,3^", options="header",width="40%"] -|================= -pkt, thread-0 ip_src,thread-1 ip_src - 1 , 55.55.0.1 , 55.55.58.153 - 2 , 55.55.0.2 , 55.55.58.154 - 3 , 55.55.0.3 , 55.55.58.155 - 4 , 55.55.0.4 , 55.55.58.156 - 5 , 55.55.0.5 , 55.55.58.157 - 6 , 55.55.0.6 , 55.55.58.158 -|================= *Some rules regarding split stream variables and burst/multi-burst*:: @@ -2149,135 +2069,6 @@ pkt, thread-0 ip_src,thread-1 ip_src * When the number of packets in a burst is smaller than the number of threads, one thread handles the burst. * In the case of a stream with a burst of *1* packet, only the first DP thread handles the stream. -==== Tutorial: Field Engine, Split to core with burst - -The following example splits generated traffic into a number of threads when using a stream configured to Burst. In contrast to the previous tutorial, this example uses the Burst pattern. As with the previous tutorial, the number of packets is split into multiple threads. In the example in this tutorial, the Field Engine is split also. - -*Without split feature enabled*:: - -In this example: - -* Number of threads: 2 -* Split: Not configured - -[source,python] ----- -# no split -class STLS1(object): - """ attack 48.0.0.1 at port 80 - """ - - def __init__ (self): - self.max_pkt_size_l3 =9*1024 - - def create_stream (self): - - base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S") - - vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src", <1> - min_value="16.0.0.0", - max_value="18.0.0.254", - size=4, op="inc"), - - STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ), <2> - - STLVmFixIpv4(offset = "IP"), # fix checksum - ] - ) - - pkt = STLPktBuilder(pkt = base_pkt, - vm = vm) - - return STLStream(packet = pkt, - mode = STLTXSingleBurst(total_pkts = 20)) <3> - ----- -<1> Stream variable. -<2> Writes it to `IPv4.src`. -<3> Burst of 20 packets. - -.Variable per thread -[format="csv",cols="1^,3^,3^", options="header",width="40%"] -|================= -pkt, thread-0 ip_src,thread-1 ip_src - 1 , 16.0.0.1 , 16.0.0.1 - 2 , 16.0.0.2 , 16.0.0.2 - 3 , 16.0.0.3 , 16.0.0.3 - 4 , 16.0.0.4 , 16.0.0.4 - 5 , 16.0.0.5 , 16.0.0.5 - 6 , 16.0.0.6, 16.0.0.6 - 7 , 16.0.0.7, 16.0.0.7 - 8 , 16.0.0.8, 16.0.0.8 - 9 , 16.0.0.9, 16.0.0.9 - 10 , 16.0.0.10, 16.0.0.10 -|================= - -*Results*:: - -* Total packets are 20 as expected, 10 generated by each thread. -* Field engine is the same for both threads. - - -*With split feature enabled*:: - -[source,python] ----- -class STLS1(object): - """ attack 48.0.0.1 at port 80 - """ - - def __init__ (self): - self.max_pkt_size_l3 =9*1024 - - def create_stream (self): - - base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S") - - vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src", - min_value="16.0.0.0", - max_value="18.0.0.254", - size=4, op="inc"), - - STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ), - - STLVmFixIpv4(offset = "IP"), # fix checksum - ] - ,split_by_field = "ip_src" <1> - - ) - - pkt = STLPktBuilder(pkt = base_pkt, - vm = vm) - - return STLStream(packet = pkt, - mode = STLTXSingleBurst(total_pkts = 20)) <2> - ----- -<1> Split is added by the `ip_src` stream variable. -<2> Burst of 20 packets. - - -.Variable per thread -[format="csv",cols="1^,3^,3^", options="header",width="40%"] -|================= -pkt, thread-0 ip_src,thread-1 ip_src - 1 , 16.0.0.1 , 17.0.0.128 - 2 , 16.0.0.2 , 17.0.0.129 - 3 , 16.0.0.3 , 17.0.0.130 - 4 , 16.0.0.4 , 17.0.0.131 - 5 , 16.0.0.5 , 17.0.0.132 - 6 , 16.0.0.6, 17.0.0.133 - 7 , 16.0.0.7, 17.0.0.134 - 8 , 16.0.0.8, 17.0.0.135 - 9 , 16.0.0.9, 17.0.0.136 - 10 , 16.0.0.10, 17.0.0.137 -|================= - -*Results*:: - -* Total packets are 20 as expected, 10 generated by each thread. -* Field engine is *not* the same for both threads. - ==== Tutorial: Field Engine, Null stream The following example creates a stream with no packets. The example uses the inter-stream gap (ISG) of the Null stream, and then starts a new stream. Essentially, this uses one property of the stream (ISG) without actually including packets in the stream. -- cgit 1.2.3-korg From 067f6e978cc796fc91195123123b392f9042831a Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 26 Sep 2016 16:19:00 +0300 Subject: v2.09 pre --- release_notes.asciidoc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 7f0d64d6..7581459e 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -23,6 +23,20 @@ ifdef::backend-docbook[] endif::backend-docbook[] +== Release 2.09 pre == + +* Statless, split to core algorithm is more accurate and simple see link:trex_stateless.html#_tutorial_field_engine_split_to_core[split_to_core] +* Add repeatable random instruction see an example link:https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/api/stl/udp_1pkt_repeat_random.py[stl/udp_1pkt_repeat_random.py] link:cp_stl_docs/api/field_engine.html#stlvmflowvarrepetablerandom[repetable_random] and link:trex_rpc_server_spec.html#_repetable_random[repetable_random_spec] +* Support dual mode for push pcap/remote Python API. see here link:cp_stl_docs/api/client_code.html#trex_stl_lib.trex_stl_client.STLClient.push_remote[push_remote] and link:cp_stl_docs/api/client_code.html#trex_stl_lib.trex_stl_client.STLClient.push_pcap[push_pcap] Using this feature pcap can be sent to client/server ports +* Add infra for L2 emulation support + +=== fix issues: === + +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-243[trex-243] +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-247[trex-247] +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-244[trex-244] + + == Release 2.08 == * Scapy JSON-RPC server for GUI packet crafting, see link:trex_scapy_rpc_server.html[trex_scapy_rpc_server] -- cgit 1.2.3-korg From c50cad952cb1d81925aa41f374966c3ddd8354cd Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 29 Sep 2016 18:09:40 +0300 Subject: add hw_cs --- release_notes.asciidoc | 4 +++- trex_rpc_server_spec.asciidoc | 14 ++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 7581459e..051e3797 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -27,7 +27,9 @@ endif::backend-docbook[] * Statless, split to core algorithm is more accurate and simple see link:trex_stateless.html#_tutorial_field_engine_split_to_core[split_to_core] * Add repeatable random instruction see an example link:https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/api/stl/udp_1pkt_repeat_random.py[stl/udp_1pkt_repeat_random.py] link:cp_stl_docs/api/field_engine.html#stlvmflowvarrepetablerandom[repetable_random] and link:trex_rpc_server_spec.html#_repetable_random[repetable_random_spec] -* Support dual mode for push pcap/remote Python API. see here link:cp_stl_docs/api/client_code.html#trex_stl_lib.trex_stl_client.STLClient.push_remote[push_remote] and link:cp_stl_docs/api/client_code.html#trex_stl_lib.trex_stl_client.STLClient.push_pcap[push_pcap] Using this feature pcap can be sent to client/server ports +* Add TCP/UDP checksum fix instruction see an example link:https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/api/stl/syn_attack_fix_cs_hw.py[stl/syn_attack_fix_cs_hw.py] link:cp_stl_docs/api/field_engine.html#stlvmfixchecksumhw[fix checksum] and link:trex_rpc_server_spec.html#_fix_checksum_hw[fix_checksum_hw_spec] +* Improve Stateless Field Engine (FE) performance +* Support dual mode for push pcap/remote Python API. see here link:cp_stl_docs/api/client_code.html#trex_stl_lib.trex_stl_client.STLClient.push_remote[push_remote] and link:cp_stl_docs/api/client_code.html#trex_stl_lib.trex_stl_client.STLClient.push_pcap[push_pcap] Using this feature pcap can be splited to client/server ports * Add infra for L2 emulation support === fix issues: === diff --git a/trex_rpc_server_spec.asciidoc b/trex_rpc_server_spec.asciidoc index e7c669ca..0e49783f 100755 --- a/trex_rpc_server_spec.asciidoc +++ b/trex_rpc_server_spec.asciidoc @@ -666,6 +666,20 @@ an Object that include instructions array and properties of the field engine pro Array of VM instruction objects to be used with this stream Any element in the array can be one of the following object types: +===== fix_checksum_hw + +Fix TCP/UDP and IPv4 headers using hardware assit engine + +.Object type 'vm - fix_checksum_hw' +[options="header",cols="1,1,3"] +|================= +| Field | Type | Description +| type | string | ''fix_checksum_hw'' +| l2_len | uint16 | len of L2 (e.g. 14 Ether) +| l3_len | uint16 | len of l3 header (e.g. 20 for IP) +| l4_type | uint16 | the type of L4 header either UDP or TCP ( L4_TYPE_UDP = 11 | L4_TYPE_TCP = 13) +|================= + ===== fix_checksum_ipv4 .Object type 'vm - fix_checksum_ipv4' -- cgit 1.2.3-korg From a18842ff5c4e496d39837eb284f41f7132ebf67f Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Fri, 30 Sep 2016 15:51:15 +0300 Subject: v2.09 --- release_notes.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 051e3797..1eebfe23 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -23,7 +23,7 @@ ifdef::backend-docbook[] endif::backend-docbook[] -== Release 2.09 pre == +== Release 2.09 == * Statless, split to core algorithm is more accurate and simple see link:trex_stateless.html#_tutorial_field_engine_split_to_core[split_to_core] * Add repeatable random instruction see an example link:https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/api/stl/udp_1pkt_repeat_random.py[stl/udp_1pkt_repeat_random.py] link:cp_stl_docs/api/field_engine.html#stlvmflowvarrepetablerandom[repetable_random] and link:trex_rpc_server_spec.html#_repetable_random[repetable_random_spec] @@ -37,7 +37,7 @@ endif::backend-docbook[] * link:http://trex-tgn.cisco.com/youtrack/issue/trex-243[trex-243] * link:http://trex-tgn.cisco.com/youtrack/issue/trex-247[trex-247] * link:http://trex-tgn.cisco.com/youtrack/issue/trex-244[trex-244] - +* link:http://trex-tgn.cisco.com/youtrack/issue/trex-249[trex-249] == Release 2.08 == -- cgit 1.2.3-korg From e976bcfed2762d1b2c9345b7f32f3c1c34b6fd17 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Fri, 30 Sep 2016 15:59:56 +0300 Subject: amend github links --- release_notes.asciidoc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 1eebfe23..23c8002a 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -25,9 +25,10 @@ endif::backend-docbook[] == Release 2.09 == + * Statless, split to core algorithm is more accurate and simple see link:trex_stateless.html#_tutorial_field_engine_split_to_core[split_to_core] -* Add repeatable random instruction see an example link:https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/api/stl/udp_1pkt_repeat_random.py[stl/udp_1pkt_repeat_random.py] link:cp_stl_docs/api/field_engine.html#stlvmflowvarrepetablerandom[repetable_random] and link:trex_rpc_server_spec.html#_repetable_random[repetable_random_spec] -* Add TCP/UDP checksum fix instruction see an example link:https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/api/stl/syn_attack_fix_cs_hw.py[stl/syn_attack_fix_cs_hw.py] link:cp_stl_docs/api/field_engine.html#stlvmfixchecksumhw[fix checksum] and link:trex_rpc_server_spec.html#_fix_checksum_hw[fix_checksum_hw_spec] +* Add repeatable random instruction see an example link:https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/stl/udp_1pkt_repeat_random.py[stl/udp_1pkt_repeat_random.py] link:cp_stl_docs/api/field_engine.html#stlvmflowvarrepetablerandom[repetable_random] and link:trex_rpc_server_spec.html#_repetable_random[repetable_random_spec] +* Add TCP/UDP checksum fix instruction see an example link:https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/stl/syn_attack_fix_cs_hw.py[stl/syn_attack_fix_cs_hw.py] link:cp_stl_docs/api/field_engine.html#stlvmfixchecksumhw[fix checksum] and link:trex_rpc_server_spec.html#_fix_checksum_hw[fix_checksum_hw_spec] * Improve Stateless Field Engine (FE) performance * Support dual mode for push pcap/remote Python API. see here link:cp_stl_docs/api/client_code.html#trex_stl_lib.trex_stl_client.STLClient.push_remote[push_remote] and link:cp_stl_docs/api/client_code.html#trex_stl_lib.trex_stl_client.STLClient.push_pcap[push_pcap] Using this feature pcap can be splited to client/server ports * Add infra for L2 emulation support -- cgit 1.2.3-korg From ae8f7eca98021d961e083d47ab0866ec5b5c592f Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Sun, 9 Oct 2016 16:41:23 +0300 Subject: FAQ corrections --- trex_faq.asciidoc | 88 +++++++++++++++++++------------------------------------ 1 file changed, 30 insertions(+), 58 deletions(-) diff --git a/trex_faq.asciidoc b/trex_faq.asciidoc index 15a9560f..ac309680 100644 --- a/trex_faq.asciidoc +++ b/trex_faq.asciidoc @@ -140,32 +140,34 @@ You have several options. + 2. To run the real product, check link:trex_manual.html#_download_and_installation[here] for hardware recommendation and installation instructions. -==== During OS installation, screen is skewed / error "out of range" / resolution not supported etc +==== During OS installation, screen is skewed / error "out of range" / resolution not supported etc. - * Fedora - during installation, choose "Troubleshooting" -> Install in basic graphic mode - * Ubuntu - try Ubuntu server, which has textual installation + * Fedora - during installation, choose "Troubleshooting" -> Install in basic graphic mode. + * Ubuntu - try Ubuntu server, which has textual installation. -==== How to determine relation between TRex ports and device under test ports +==== How to determine relation between TRex ports and device under test ports? -Run the TRex with following command and check incoming packet on router interfaces: +Run TRex with the below command and check incoming packet count on DUT interfaces. [source,bash] ---- sudo ./t-rex-64 -f cap2/dns.yaml --lm 1 --lo -l 1000 -d 100 ---- -==== How to determine relation between Virtual OS ports and Hypervisor ports +Alternatively, you can run TRex in stateless mode, send traffic from each port, and look at the counters on the DUT interfaces. + +==== How to determine relation between Virtual OS ports and Hypervisor ports? Compare the MACs address + name of interface, for example: [source,bash] ---- -* > ifconfig + -*eth0* Link encap:Ethernet *HWaddr 00:0c:29:2a:99:b2* + +> ifconfig +eth0 Link encap:Ethernet HWaddr 00:0c:29:2a:99:b2 ... -* > sudo ./dpdk_setup_ports.py -s + -*03:00.0* 'VMXNET3 Ethernet Controller' *if=eth0* drv=vmxnet3 unused=igb_uio +> sudo ./dpdk_setup_ports.py -s +03:00.0 'VMXNET3 Ethernet Controller' if=eth0 drv=vmxnet3 unused=igb_uio ---- [NOTE] @@ -184,20 +186,19 @@ We are planning to add MACs to `./dpdk_setup_ports.py -s` ==== TRex traffic does not show up on Wireshark, so I can not capture the traffic from the TRex port TRex uses DPDK which takes ownership of the ports, so using Wireshark is not possible. You can use switch with port mirroring to capture the traffic. -==== How can I map betwean TRex port-id (e.g. port 0) and physical router interface? -Load TRex in stateless mode, run traffic from each port, and look at the counters on the router interfaces. - - === Stateful ==== How do I start using the stateful mode? You should first have a YAML configuration file. See link:trex_manual.html#_traffic_yaml_parameter_of_f_option[here]. Then, you can find some basic examples link:trex_manual.html#_trex_command_line[here]. -==== TRex is connected to a switch and we observe many dropped packets at TRex startup. -A switch might be configured with spanning tree enabled. TRex initializes the port at startup, making the spanning tree drop the packets. +==== TRex is connected to a switch and I observe many dropped packets at TRex startup. +A switch might be configured with spanning tree enabled. TRex reset the port at startup, making the switch reset it side as well, +and spanning tree can drop the packets until it stabilizes. Disabling spanning tree can help. On Cisco nexus, you can do that using `spanning-tree port type edge` -This issue would be fixed when we consolidate ``Stateful'' and ``Stateless'' RPC. +You can also start Cisco with -k flag. This will send packets for k seconds before starting the actual test, letting the spanning +tree time to stabilize. +This issue will be fixed when we consolidate ``Stateful'' and ``Stateless'' RPC. ==== I can not see RX packets TRex does not support ARP yet, you should configure the DUT to send the packets to the TRex port MAC address. From Stateless mode, you can change the port mode to promiscuous. + @@ -218,52 +219,23 @@ Yes. We know this is something many people would like, and are working on this. You can use the simulator. see link:trex_manual.html#_simulator[simulator] The output of the simulator can be loaded to Excel. The CPS can be tuned. -==== I want to have more active flows on the DUT, how can I do it? -After stretching TRex to its maximum CPS capacity, consider the following: DUT will have much more active flows in case of a UDP flow due to the nature of aging (DUT does not know when the flow ends while TRex knows). -In order to artificialy increse the length of the active flows in TRex, you can config larger IPG in the YAML file. This will cause each flow to last longer. Alternatively, you can increase IPG in your PCAP file as well. - - -==== How do I support more active flows? -The default maximum supported flows are 1M total (TRex prospective). DUT could have much more due to aging. When active flows are more than 1M flows there is message that there is no enough memory. +==== I want to have more acrive flows in TRex, how can I do this? +Default maximum supported flows is 1M (From TRex prespective. DUT might have much more due to slower aging). When active flows reach higher number, you will get ``out of memory'' error message -[source,Python] --------- -Active-flows : 1045562 Clients : 80120 Socket-util : 0.0207 % --------- - -Look link:trex_manual.html#_memory_section_configuration[here] - -This example support 10M flows - -[source,Python] --------- -- port_limit : 2 - version : 2 - interfaces : ['04:00.0', '0c:00.0'] # list of the interfaces to bind run ./dpdk_nic_bind.py --status - port_info : # set eh mac addr - - - dest_mac : [0x18, 0x8b, 0x9d, 0xa3, 0xae, 0x84] - src_mac : [0x18, 0x8b, 0x9d, 0xa3, 0xae, 0x83] - - - dest_mac : [0x18, 0x8b, 0x9d, 0xa3, 0xae, 0x83] - src_mac : [0x18, 0x8b, 0x9d, 0xa3, 0xae, 0x84] - - memory : - dp_flows : 10048576 <1> --------- -<1> 10M flows +To increase the number of supported active flows, you should add ``dp_flows'' arg in config file ``memory'' section. +Look link:trex_manual.html#_memory_section_configuration[here] for more info. +==== I want to have more active flows on the DUT, how can I do this? +After stretching TRex to its maximum CPS capacity, consider the following: DUT will have much more active flows in case of a UDP flow due to the nature of aging (DUT does not know when the flow ends while TRex knows). +In order to artificialy increse the length of the active flows in TRex, you can config larger IPG in the YAML file. This will cause each flow to last longer. Alternatively, you can increase IPG in your PCAP file as well. -==== I am getting and error: The number of ips should be at least number of threads +==== I am getting an error: The number of ips should be at least number of threads. The range of clients and servers should be at least the number of threads. The number of threads is equal to (number of port pairs) * (-c value) -==== Incoming frames are of type SCTP. Why? -Default latency packets are SCTP, you can remove `-l 1000` or change it to ICMP see manual for more info. +==== Some of the incoming frames are of type SCTP. Why? +Default latency packets are SCTP, you can omit the `-l ` from command line, or change it to ICMP. See the manual for more info. -==== Is there a configuration guide to Linux as a router (static ARP)? -have a look link:https://groups.google.com/forum/#!topic/trex-tgn/YQcQtA8A8hA[linux as a router] - === Stateless ==== How do I get started with stateless mode? @@ -305,7 +277,7 @@ Yes. You can build any packet you like using Scapy. However, there is no way to create corrupted L1 fields (Like Ethernet FCS), since these are usually handled by the NIC hardware. ==== Why the performance is low? -What would reduce the performance: +Major things that can reduce the performance are: 1. Many concurent streams. 2. Complex field engine program. @@ -418,7 +390,7 @@ To workaround this you could create one stream in lower speed for latency (e.g. flow_stats = STLFlowLatencyStats(pg_id = 12+port_id)) -------- <1> non latency stream will be amplified -<2> latency stream, the speed would be constant 1KPPS +<2> latency stream, the speed will be constant 1KPPS ===== Latency stream has constant rate of 1PPS, and is not getting amplified by multiplier. Why? -- cgit 1.2.3-korg From 46531ad0637c3585a9b55569762ef8a84d9fc5b0 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 13 Oct 2016 11:30:38 +0300 Subject: update illustrations for dpdk summit Signed-off-by: Hanoh Haim --- visio_drawings/illustrations_stateless.vsd | Bin 5082624 -> 5346816 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/visio_drawings/illustrations_stateless.vsd b/visio_drawings/illustrations_stateless.vsd index aa827a21..9a979a7f 100755 Binary files a/visio_drawings/illustrations_stateless.vsd and b/visio_drawings/illustrations_stateless.vsd differ -- cgit 1.2.3-korg From 2441faec09013a9b222a9b1c5d3e4f858e15817b Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Tue, 11 Oct 2016 06:28:37 +0300 Subject: stl 40G bench --- images/128_nodrop.png | Bin 0 -> 40822 bytes images/128_util.png | Bin 0 -> 40877 bytes images/1514_nodrop.png | Bin 0 -> 44913 bytes images/1514_util.png | Bin 0 -> 40567 bytes images/590_nodrop.png | Bin 0 -> 41542 bytes images/590_util.png | Bin 0 -> 40506 bytes images/64_nodrop.png | Bin 0 -> 40877 bytes images/64_util.png | Bin 0 -> 41139 bytes images/icons/callouts/1.png | Bin 329 -> 183 bytes images/icons/callouts/10.png | Bin 361 -> 229 bytes images/icons/callouts/11.png | Bin 565 -> 223 bytes images/icons/callouts/12.png | Bin 617 -> 237 bytes images/icons/callouts/13.png | Bin 623 -> 238 bytes images/icons/callouts/14.png | Bin 411 -> 245 bytes images/icons/callouts/15.png | Bin 640 -> 251 bytes images/icons/callouts/2.png | Bin 353 -> 209 bytes images/icons/callouts/3.png | Bin 350 -> 211 bytes images/icons/callouts/4.png | Bin 345 -> 204 bytes images/icons/callouts/5.png | Bin 348 -> 206 bytes images/icons/callouts/6.png | Bin 355 -> 219 bytes images/icons/callouts/7.png | Bin 344 -> 199 bytes images/icons/callouts/8.png | Bin 357 -> 219 bytes images/icons/callouts/9.png | Bin 357 -> 228 bytes trex_stateless.asciidoc | 6 +- trex_stateless_bench.asciidoc | 149 ++++++++++++++++++++++++++++++++++++++++++ ws_main.py | 3 + 26 files changed, 157 insertions(+), 1 deletion(-) create mode 100755 images/128_nodrop.png create mode 100755 images/128_util.png create mode 100755 images/1514_nodrop.png create mode 100755 images/1514_util.png create mode 100755 images/590_nodrop.png create mode 100755 images/590_util.png create mode 100755 images/64_nodrop.png create mode 100755 images/64_util.png create mode 100755 trex_stateless_bench.asciidoc diff --git a/images/128_nodrop.png b/images/128_nodrop.png new file mode 100755 index 00000000..e2a4c59a Binary files /dev/null and b/images/128_nodrop.png differ diff --git a/images/128_util.png b/images/128_util.png new file mode 100755 index 00000000..7dd7627e Binary files /dev/null and b/images/128_util.png differ diff --git a/images/1514_nodrop.png b/images/1514_nodrop.png new file mode 100755 index 00000000..34902725 Binary files /dev/null and b/images/1514_nodrop.png differ diff --git a/images/1514_util.png b/images/1514_util.png new file mode 100755 index 00000000..7ad7d4da Binary files /dev/null and b/images/1514_util.png differ diff --git a/images/590_nodrop.png b/images/590_nodrop.png new file mode 100755 index 00000000..569bb9e7 Binary files /dev/null and b/images/590_nodrop.png differ diff --git a/images/590_util.png b/images/590_util.png new file mode 100755 index 00000000..892caf0b Binary files /dev/null and b/images/590_util.png differ diff --git a/images/64_nodrop.png b/images/64_nodrop.png new file mode 100755 index 00000000..7dd7627e Binary files /dev/null and b/images/64_nodrop.png differ diff --git a/images/64_util.png b/images/64_util.png new file mode 100755 index 00000000..d3b6807e Binary files /dev/null and b/images/64_util.png differ diff --git a/images/icons/callouts/1.png b/images/icons/callouts/1.png index 7d473430..054ea07a 100755 Binary files a/images/icons/callouts/1.png and b/images/icons/callouts/1.png differ diff --git a/images/icons/callouts/10.png b/images/icons/callouts/10.png index 997bbc82..8833bd59 100755 Binary files a/images/icons/callouts/10.png and b/images/icons/callouts/10.png differ diff --git a/images/icons/callouts/11.png b/images/icons/callouts/11.png index ce47dac3..d77914d8 100755 Binary files a/images/icons/callouts/11.png and b/images/icons/callouts/11.png differ diff --git a/images/icons/callouts/12.png b/images/icons/callouts/12.png index 31daf4e2..ac9f8af7 100755 Binary files a/images/icons/callouts/12.png and b/images/icons/callouts/12.png differ diff --git a/images/icons/callouts/13.png b/images/icons/callouts/13.png index 14021a89..e5e62a1f 100755 Binary files a/images/icons/callouts/13.png and b/images/icons/callouts/13.png differ diff --git a/images/icons/callouts/14.png b/images/icons/callouts/14.png index 64014b75..f55ef966 100755 Binary files a/images/icons/callouts/14.png and b/images/icons/callouts/14.png differ diff --git a/images/icons/callouts/15.png b/images/icons/callouts/15.png index 0d65765f..fee9beda 100755 Binary files a/images/icons/callouts/15.png and b/images/icons/callouts/15.png differ diff --git a/images/icons/callouts/2.png b/images/icons/callouts/2.png index 5d09341b..b05ad9c7 100755 Binary files a/images/icons/callouts/2.png and b/images/icons/callouts/2.png differ diff --git a/images/icons/callouts/3.png b/images/icons/callouts/3.png index ef7b7004..9ce22b7d 100755 Binary files a/images/icons/callouts/3.png and b/images/icons/callouts/3.png differ diff --git a/images/icons/callouts/4.png b/images/icons/callouts/4.png index adb8364e..03d55928 100755 Binary files a/images/icons/callouts/4.png and b/images/icons/callouts/4.png differ diff --git a/images/icons/callouts/5.png b/images/icons/callouts/5.png index 4d7eb460..710b57cc 100755 Binary files a/images/icons/callouts/5.png and b/images/icons/callouts/5.png differ diff --git a/images/icons/callouts/6.png b/images/icons/callouts/6.png index 0ba694af..65ce9b91 100755 Binary files a/images/icons/callouts/6.png and b/images/icons/callouts/6.png differ diff --git a/images/icons/callouts/7.png b/images/icons/callouts/7.png index 472e96f8..07bc7f1c 100755 Binary files a/images/icons/callouts/7.png and b/images/icons/callouts/7.png differ diff --git a/images/icons/callouts/8.png b/images/icons/callouts/8.png index 5e60973c..fc640cab 100755 Binary files a/images/icons/callouts/8.png and b/images/icons/callouts/8.png differ diff --git a/images/icons/callouts/9.png b/images/icons/callouts/9.png index a0676d26..5bbc0ad9 100755 Binary files a/images/icons/callouts/9.png and b/images/icons/callouts/9.png differ diff --git a/trex_stateless.asciidoc b/trex_stateless.asciidoc index 0b6ad8f7..f3ae85b3 100755 --- a/trex_stateless.asciidoc +++ b/trex_stateless.asciidoc @@ -5,7 +5,7 @@ TRex Stateless support :revnumber: 2.01 :quotes.++: :numbered: -:web_server_url: http://trex-tgn.cisco.com/trex +:web_server_url: https://trex-tgn.cisco.com/trex :local_web_server_url: csi-wiki-01:8181/trex :github_stl_path: https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/stl :github_stl_examples_path: https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/automation/trex_control_plane/stl/examples @@ -4054,6 +4054,10 @@ The followig keyboard commands operate in the TUI window: + c - Clear all counters + d, s, l - change display between dashboard (d), streams (s) and l (latency) info. + +=== Benchmarks of 40G NICs + +link:trex_stateless_bench.html[TRex stateless benchmarks] + === Appendix ==== Scapy packet examples diff --git a/trex_stateless_bench.asciidoc b/trex_stateless_bench.asciidoc new file mode 100755 index 00000000..0ddcb35d --- /dev/null +++ b/trex_stateless_bench.asciidoc @@ -0,0 +1,149 @@ +TRex Stateless support +====================== +:email: trex.tgen@gmail.com +:quotes.++: +:numbered: +:web_server_url: https://trex-tgn.cisco.com/trex +:local_web_server_url: csi-wiki-01:8181/trex +:toclevels: 6 +:tabledef-default.subs: normal,callouts + +include::trex_ga.asciidoc[] + +// PDF version - image width variable +ifdef::backend-docbook[] +:p_width: 450 +endif::backend-docbook[] + +// HTML version - image width variable +ifdef::backend-xhtml11[] +:p_width: 800 +endif::backend-xhtml11[] + + +== TRex stateless L2 benchmarks using XL710 40G NICs + +=== Setup details + +[cols="1,5"] +|================= +| Server: | UCSC-C240-M4SX +| CPU: | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz +| RAM: | 65536 @ 2133 MHz +| NICs: | 2 x Intel Corporation Ethernet Controller XL710 for 40GbE QSFP+ (rev 01) +| QSFP: | Cisco QSFP-H40G-AOC1M +| OS: | Fedora 18 +| Switch: | Cisco Nexus 3172 Chassis, System version: 6.0(2)U5(2). +| TRex: | v2.09 using 7 cores per dual interface. +|================= + +=== Topology + +TRex port 1 ↔ Switch port Eth1/50 (vlan 1005) ↔ Switch port Eth1/52 (vlan 1005) ↔ TRex port 2 + +=== Results + +.Cached VM +[cols="2,2^,2^,2^,2^,2^,2^,2^,3", options="header"] +|================= +| Packet size | Line Utilization (%) | Total L1 (Gb/s) | Total L2 (Gb/s) | CPU Util (%) | Total MPPS | BW per core (Gb/s) <1> | MPPS per core <2> | Multiplier +| Imix | 100.04 | 80.03 | 76.03 | 2.7 | 25.03 | 89.74 | 28.07 | 100% +| 1514 | 100.12 | 80.1 | 79.05 | 1.33 | 6.53 | 430.18 | 35.07 | 100% +| 590 | 99.36 | 79.49 | 76.89 | 3.2 | 16.29 | 177.43 | 36.36 | 99.5% +| 128 | 99.56 | 79.65 | 68.89 | 15.4 | 67.27 | 36.94 | 31.2 | 99.5% +| 64 | 52.8 | 42.3 | 32.23 | 14.1 | 62.95 | 21.43 | 31.89 | 31.5mpps +|================= + +.VM with 1 variable +[cols="2,2^,2^,2^,2^,2^,2^,2^,3", options="header"] +|================= +| Packet size | Line Utilization (%) | Total L1 (Gb/s) | Total L2 (Gb/s) | CPU Util (%) | Total MPPS | BW per core (Gb/s) <1> | MPPS per core <2> | Multiplier +| Imix | 100.04 | 80.03 | 76.03 | 12.6 | 25.03 | 45.37 | 14.19 | 100% +| 1514 | 100.12 | 80.1 | 79.05 | 2.6 | 6.53 | 220.05 | 17.94 | 100% +| 590 | 99.36 | 79.49 | 76.89 | 5.6 | 16.29 | 101.39 | 20.78 | 99.5% +| 128 | 99.56 | 79.65 | 68.89 | 33.1 | 67.27 | 17.19 | 14.52 | 99.5% +| 64 | 52.8 | 42.3 | 32.23 | 31.3 | 63.06 | 9.65 | 14.37 | 31.5mpps +|================= + +<1> Extrapolated L1 bandwidth per 1 core @ 100% CPU utilization. +<2> Extrapolated amount of MPPS per 1 core @ 100% CPU utilization. + +== Appendix + +=== Preparing setup and running the tests. + +==== Hardware preparations + +Order the UCS with HW described above. + +* There are several NICs with this chipset. + +Bare Intel NICs don't work with Cisco QSFP+ optics, for such case you will need Silicom NICs. +* Use NICs with 2x40G ports in each. +* Put the NICs at different NUMAs (first on the left side, second on the right side). + +==== Software preparations + +* Install the OS (Bare metal Linux, *not* VM!) +* Obtain the latest TRex package: wget https://trex-tgn.cisco.com/trex/release/latest +* Untar the package: tar -xzf latest +* Change dir to unzipped TRex +* Create config file using command: sudo python dpdk_setup_ports.py -i +** In case of Ubuntu 16 need python3 +** The script by default produces config for loopback or L2 Switch as DUT (ports connection 1↔2, 3↔4 etc.). + +If you have router or L3 switch, change the destination MACs of TRex interfaces to match the DUT one's. + +==== The tests + +* Run the TRex server: sudo ./t-rex-64 -i -c 7 +* In another shell run TRex console: trex-console +** The console can be run from another computer with -s argument, --help for more info. +** Other options for TRex client are automation or GUI +* In the console, run "tui" command, and then send the traffic with commands like: +** start -f stl/bench.py -m 50% --port 0 3 -t size=590,vm=var1 +** stop +** clear +** start -f stl/bench.py -m 30mpps --port 0 -t size=64,vm=cached +** start -f stl/bench.py -m 100% -t size=1514,vm=random --force + +=== Some of screenshots of console with commands + +==== 64 bytes + +Utilization: + +image:images/64_util.png[title="64 bytes util",align="left",width={p_width}, link="images/64_util.png"] + +No drops: + +image:images/64_nodrop.png[title="64 bytes no drops",align="left",width={p_width}, link="images/64_nodrop.png"] + +==== 128 bytes + +Utilization: + +image:images/128_util.png[title="128 bytes util",align="left",width={p_width}, link="images/128_util.png"] + +No drops: + +image:images/128_nodrop.png[title="128 bytes no drops",align="left",width={p_width}, link="images/128_nodrop.png"] + +==== 590 bytes + +Utilization: + +image:images/590_util.png[title="128 bytes util",align="left",width={p_width}, link="images/590_util.png"] + +No drops: + +image:images/590_nodrop.png[title="590 bytes no drops",align="left",width={p_width}, link="images/590_nodrop.png"] + +==== 1514 bytes + +Utilization: + +image:images/1514_util.png[title="128 bytes util",align="left",width={p_width}, link="images/1514_util.png"] + +No drops: + +image:images/1514_nodrop.png[title="1514 bytes no drops",align="left",width={p_width}, link="images/1514_nodrop.png"] + diff --git a/ws_main.py b/ws_main.py index 8b2f99b6..762bb7b2 100755 --- a/ws_main.py +++ b/ws_main.py @@ -908,6 +908,9 @@ def build(bld): bld(rule=convert_to_html_toc_book, source='trex_stateless.asciidoc waf.css', target='trex_stateless.html',scan=ascii_doc_scan); + bld(rule=convert_to_html_toc_book, + source='trex_stateless_bench.asciidoc waf.css', target='trex_stateless_bench.html',scan=ascii_doc_scan); + bld(rule=convert_to_html_toc_book, source='trex_book.asciidoc waf.css', target='trex_manual.html',scan=ascii_doc_scan); -- cgit 1.2.3-korg From ecd1a97965af52b88ab2fa558ee23466ccbd92b8 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Thu, 13 Oct 2016 19:48:35 +0300 Subject: merge XL710 benchmark Signed-off-by: Hanoh Haim --- trex_faq.asciidoc | 1 + trex_index.asciidoc | 2 ++ trex_stateless_bench.asciidoc | 8 +++++--- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/trex_faq.asciidoc b/trex_faq.asciidoc index ac309680..169b04be 100644 --- a/trex_faq.asciidoc +++ b/trex_faq.asciidoc @@ -118,6 +118,7 @@ This is still better than the Intel x520 (82559 based) which can reach ~30MPPS f ==== I have XL710 NIC with 2x40Gb/sec ports and I can not get line rate XL710-da2 with 2 40G ports can reach maximum of 40MPPS/50Gb (total for all ports) and not 60MPPS with small packets (64B) Intel had in mind redundancy use case when they produced a two port NIC. Card was not intended to reach 80G line rate. +see link:trex_stateless_bench.html[xl710_benchmark.html] for more info ==== I want to contribute to the project You have several ways you can help: + diff --git a/trex_index.asciidoc b/trex_index.asciidoc index b2f3b1fd..454e21ec 100644 --- a/trex_index.asciidoc +++ b/trex_index.asciidoc @@ -72,6 +72,8 @@ link:http://trex-tgn.cisco.com/youtrack/dashboard[youtrack] link:../release/[pkgs] | Windows Stateful GUI | link:../client_gui/[stateful GUI] +| XL710 Statless performance | +link:trex_stateless_bench.html[xl710_benchmark.html] |================= == For Developers diff --git a/trex_stateless_bench.asciidoc b/trex_stateless_bench.asciidoc index 0ddcb35d..fc55e775 100755 --- a/trex_stateless_bench.asciidoc +++ b/trex_stateless_bench.asciidoc @@ -1,5 +1,5 @@ -TRex Stateless support -====================== +XL710 Statless benchmark +======================== :email: trex.tgen@gmail.com :quotes.++: :numbered: @@ -68,6 +68,8 @@ TRex port 1 ↔ Switch port Eth1/50 (vlan 1005) ↔ Switch port Eth1/52 <1> Extrapolated L1 bandwidth per 1 core @ 100% CPU utilization. <2> Extrapolated amount of MPPS per 1 core @ 100% CPU utilization. +INFO: XL710 NIC does not support linerate for 64B by design. only 128B is supported. + == Appendix === Preparing setup and running the tests. @@ -87,7 +89,7 @@ Bare Intel NICs don't work with Cisco QSFP+ optics, for such case you will need * Obtain the latest TRex package: wget https://trex-tgn.cisco.com/trex/release/latest * Untar the package: tar -xzf latest * Change dir to unzipped TRex -* Create config file using command: sudo python dpdk_setup_ports.py -i +* Create config file using command: sudo python dpdk_setup_ports.py -i *TBD more info here* ** In case of Ubuntu 16 need python3 ** The script by default produces config for loopback or L2 Switch as DUT (ports connection 1↔2, 3↔4 etc.). + If you have router or L3 switch, change the destination MACs of TRex interfaces to match the DUT one's. -- cgit 1.2.3-korg From cb28556d0adca8233aebb3118b2748a3e6553e56 Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Sun, 16 Oct 2016 09:26:14 +0300 Subject: stl benchmark: step by step config creation --- trex_stateless_bench.asciidoc | 105 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 98 insertions(+), 7 deletions(-) diff --git a/trex_stateless_bench.asciidoc b/trex_stateless_bench.asciidoc index fc55e775..d4bd5367 100755 --- a/trex_stateless_bench.asciidoc +++ b/trex_stateless_bench.asciidoc @@ -1,5 +1,5 @@ -XL710 Statless benchmark -======================== +TRex Stateless support +====================== :email: trex.tgen@gmail.com :quotes.++: :numbered: @@ -68,8 +68,6 @@ TRex port 1 ↔ Switch port Eth1/50 (vlan 1005) ↔ Switch port Eth1/52 <1> Extrapolated L1 bandwidth per 1 core @ 100% CPU utilization. <2> Extrapolated amount of MPPS per 1 core @ 100% CPU utilization. -INFO: XL710 NIC does not support linerate for 64B by design. only 128B is supported. - == Appendix === Preparing setup and running the tests. @@ -89,10 +87,9 @@ Bare Intel NICs don't work with Cisco QSFP+ optics, for such case you will need * Obtain the latest TRex package: wget https://trex-tgn.cisco.com/trex/release/latest * Untar the package: tar -xzf latest * Change dir to unzipped TRex -* Create config file using command: sudo python dpdk_setup_ports.py -i *TBD more info here* +* Create config file using command: sudo python dpdk_setup_ports.py -i ** In case of Ubuntu 16 need python3 -** The script by default produces config for loopback or L2 Switch as DUT (ports connection 1↔2, 3↔4 etc.). + -If you have router or L3 switch, change the destination MACs of TRex interfaces to match the DUT one's. +** See paragraph link:trex_stateless_bench.html#_config_creation[config creation] for detailed step-by-step ==== The tests @@ -107,6 +104,100 @@ If you have router or L3 switch, change the destination MACs of TRex interfaces ** start -f stl/bench.py -m 30mpps --port 0 -t size=64,vm=cached ** start -f stl/bench.py -m 100% -t size=1514,vm=random --force +==== Config creation + +In our setup we will not use hyper-threading. + +We will start with command: + +sudo ./dpdk_setup_ports.py -i --no-ht + + + +Printed table with interfaces info: + +[cols="4,6,9,19,33,9,10,10", options="header"] +|================= +^| ID ^| NUMA ^| PCI ^| MAC ^| Name ^| Driver ^| Linux IF ^| Active +| 0 | 0 | 02:00.0 | 68:05:ca:32:15:b0 | Device 1583 | i40e | p1p1 | +| 1 | 0 | 02:00.1 | 68:05:ca:32:15:b1 | Device 1583 | i40e | p1p2 | +| 2 | 0 | 05:00.0 | 00:E0:ED:5D:82:D1 | Device 1583 | igb_uio | | +| 3 | 0 | 05:00.1 | 00:E0:ED:5D:82:D2 | Device 1583 | igb_uio | | +| 4 | 0 | 0a:00.0 | 04:62:73:5f:e8:a8 | I350 Gigabit Network Connection | igb | p4p1 | \*Active* +| 5 | 0 | 0a:00.1 | 04:62:73:5f:e8:a9 | I350 Gigabit Network Connection | igb | p4p2 | +| 6 | 1 | 84:00.0 | 68:05:CA:32:0C:38 | Device 1583 | igb_uio | | +| 7 | 1 | 84:00.1 | 68:05:CA:32:0C:39 | Device 1583 | igb_uio | |d +|================= + +We will be asked to specify interfaces for TRex usage: + +========================== +Please choose even number of interfaces either by ID or PCI or Linux IF (look at columns above). + +Stateful will use order of interfaces: Client1 Server1 Client2 Server2 etc. for flows. + +Stateless can be in any order. + +Try to choose each pair of interfaces to be on same NUMA within the pair for performance. + +Enter list of interfaces in line (for example: 1 3) : *2 3 6 7* +========================== + +In our setup we have used 2, 3, 6, 7. + +Next, we need to specify destination MAC addresses for given interfaces. + +By default assumed loopback or L2 Switch with ports connection: 1^st^ port↔2^nd^ port, 3^rd^ port↔4^th^ port etc. + +If you have router or L3 switch or some different connection, change the destination MACs accordingly. + +In our case, ports are connected 2↔7, 3↔6. + +We will give proper MACs as destination by clicking "y" and copy-paste MAC: + +========================== +For interface 2, assuming loopback to it's dual interface 3. + +Destination MAC is 00:E0:ED:5D:82:D2. Change it to MAC of DUT? (y/N).*y* + +Please enter new destination MAC of interface 2: *68:05:CA:32:0C:39* + +For interface 3, assuming loopback to it's dual interface 2. + +Destination MAC is 00:E0:ED:5D:82:D1. Change it to MAC of DUT? (y/N).*y* + +Please enter new destination MAC of interface 3: *68:05:CA:32:0C:38* + +For interface 6, assuming loopback to it's dual interface 7. + +Destination MAC is 68:05:CA:32:0C:39. Change it to MAC of DUT? (y/N).*y* + +Please enter new destination MAC of interface 6: *00:E0:ED:5D:82:D2* + +For interface 7, assuming loopback to it's dual interface 6. + +Destination MAC is 68:05:CA:32:0C:38. Change it to MAC of DUT? (y/N).*y* + +Please enter new destination MAC of interface 7: *00:E0:ED:5D:82:D1* +========================== + +Finally, you can print generated config and save it to file: + +========================== +Print preview of generated config? (Y/n) + +++++ +
### Config file generated by dpdk_setup_ports.py ###
+
+- port_limit: 4
+  version: 2
+  interfaces: ['05:00.0', '05:00.1', '84:00.0', '84:00.1']
+  port_info:
+      - dest_mac: [0x68, 0x05, 0xca, 0x32, 0x0c, 0x39]
+        src_mac:  [0x00, 0xe0, 0xed, 0x5d, 0x82, 0xd1]
+      - dest_mac: [0x68, 0x05, 0xca, 0x32, 0x0c, 0x38]
+        src_mac:  [0x00, 0xe0, 0xed, 0x5d, 0x82, 0xd2]
+
+      - dest_mac: [0x00, 0xe0, 0xed, 0x5d, 0x82, 0xd2]
+        src_mac:  [0x68, 0x05, 0xca, 0x32, 0x0c, 0x38]
+      - dest_mac: [0x00, 0xe0, 0xed, 0x5d, 0x82, 0xd1]
+        src_mac:  [0x68, 0x05, 0xca, 0x32, 0x0c, 0x39]
+
+  platform:
+      master_thread_id: 0
+      latency_thread_id: 15
+      dual_if:
+        - socket: 0
+          threads: [1,2,3,4,5,6,7] +
+
+        - socket: 1
+          threads: [8,9,10,11,12,13,14]
+
+
+++++ +Save the config to file? (Y/n) + +Default filename is /etc/trex_cfg.yaml + +Press ENTER to confirm or enter new file: + +File /etc/trex_cfg.yaml already exist, overwrite? (y/N)*y* + +Saved. +========================== + + === Some of screenshots of console with commands ==== 64 bytes -- cgit 1.2.3-korg From c0a274d9f94a64648643bca2472c632b216c64e2 Mon Sep 17 00:00:00 2001 From: Anton Kiselev Date: Mon, 17 Oct 2016 18:27:59 +0700 Subject: Updating documentation for scapy_service --- trex_scapy_rpc_server.asciidoc | 731 +++++++++++++++++++++++++++-------------- 1 file changed, 484 insertions(+), 247 deletions(-) diff --git a/trex_scapy_rpc_server.asciidoc b/trex_scapy_rpc_server.asciidoc index e13c63d6..6dce9180 100755 --- a/trex_scapy_rpc_server.asciidoc +++ b/trex_scapy_rpc_server.asciidoc @@ -56,135 +56,500 @@ Error codes are given according to this table: [also follows the JSON-RPC spec, == Data Bases and Data Structures used in Scapy Server -=== Protocol Field Description -This data structure contains the name of the field, its type and the default value assigned. + - + -Has the following structure: + - +=== build_pkt, reconstruct_pkt packet model [[build_pkt_input]] -(field name, field type, default value) + +Following JSON represents a Scapy structure, which can be used to build packet from scratch(build_pkt) or to modify particular fields in the prococol(reconstruct_pkt). Most fields can be omitted, in this case default or calculated values will be used. +For reconstruct_pkt default values will be taken from the original packet. +Exaples of JSON payloads and their scapy expression alternatives -*Example:* + -this is the 'dst' field for the 'Ether' protocol -[source,bash] ---- -["dst","MACField","('00:00:00:01:00:00')"] - +Ether(src="de:ad:be:ef:de:ad")/Dot1Q()/Dot1Q(vtype=1)/IP(src="127.0.0.1", chksum="0x312")/TCP(sport=443) ---- - -=== Offsets Dictionary and Offset Entry -==== The *"Offset Entry"* data structure contains the offset of a field within the *layer*, and it's size. + -(both measured in Bytes) - + - + -Has the following structure: + - + -[field offset (within the layer), field size] + - - - -*Example:* + -This is the 'src' field for the 'IP' protocol: + -The offset within the layer is 16 bytes, and the size of the field is 4 bytes (as defined in the IP spec) -[source,bash] ---- -'dst': [16, 4] +[ + { "id": "Ether", "fields": [{"id": "src", "value": "de:ad:be:ef:de:ad"}] }, + { "id": "Dot1Q"}, + { "id": "Dot1Q", "fields": [{"id": "vtype", "value": "1"}] }, + { "id": "IP", "fields": [{"id": "src", "value": "127.0.0.1"}, {"id": "chksum", "value": "0x312"}] }, + { "id": "TCP", "fields": [{"id": "sport", "value": "443"}] } +] ---- -==== The *"Offsets Dictionary"* data sturcture simply maps the offsets for each layer according to name. + -Has the following structure: + - + - 'field name' : [ field offset, field size ] // i.e Offset entry - + - - +=== Scapy server value types +Most values can be passed as strings(including decimal numbers, hex numbers, enums, values), +but for binary payload, value object should be used -*Example:* + -This is the Offsets Dictionary for the IP layer: + -[source,bash] ---- -'offsets': {'IP': {'chksum': [10, 2], - 'dst': [16, 4], - 'flags': [6, 0], - 'frag': [6, 0], - 'global_offset': 0, - 'id': [4, 2], - 'ihl': [0, 0], - 'len': [2, 2], - 'options': [20, 2], - 'proto': [9, 1], - 'src': [12, 4], - 'tos': [1, 1], - 'ttl': [8, 1], - 'version': [0, 0] - } - } +- int/long/str - they can de specified directly as a value of a field +- {"vtype": "BYTES", "base64": "my_payload_base64"} - binary payload passed as base64 +- {"vtype": "EXPRESSION", "expr": "TCPOptions()"} - python expression(normally, should be avoided) +- {"vtype": "UNDEFINED"} - unset field value, and let it be assigned automatically +- {"vtype": "RANDOM"} - assign a random value to a field ---- +Example of object value usage(to specify binary payload) +---- +Ether()/IP()/TCP()/Raw(load=my_payload) +---- -Each layer has a 'global_offset' key. This key represents the *offset of the layer within the packet*. + -In the example above, the IP layer starts at offset 0, and the field 'src' is at offset 12 within the packet. + -In the general case, a field's offset within the *packet* is calculated this way: + - 'global_offset' + 'field_offset' - - -=== Protocol Dictionary -The protocol dictionary contains the names for all supported protocols and layers for building packets. + -Each entry in this database has the following format: + -'Protocol Name' : 'Protocol Field Description' + - + - - -*Example*: + -[source,bash] ---- -{ "Ether":[ - ["dst","MACField","('00:00:00:01:00:00')"], - ["src","MACField","('00:00:00:02:00:00')"], - ["type", "XShortEnumField", "(36864)"] - ], - "ARP":[ - ["hwtype", "XShortField", "(1)"], - ["ptype", "XShortEnumField", "(2048)"], - ["hwlen", "ByteField", "(6)"], - ["plen", "ByteField", "(4)"], - ["op", "ShortEnumField", "(1)"], - ["hwsrc", "ARPSourceMACField", "(None)"], - ["psrc", "SourceIPField", "(None)"], - ["hwdst", "MACField", "(\'00:00:00:00:00:00\')"], - ["pdst", "IPField", "(\'0.0.0.0\')"] - ], - . - . - . - . -} +[ + { "id": "Ether"}, + { "id": "IP"}, + { "id": "TCP"}, + { "id": "Raw", "fields": [ + { + "id": "load", + "value": {"vtype": "BYTES", "base64": "my_payload_base64"} + } + ]} +] ---- -=== Fields Dictionary -The fields dictionary contains mapping between a field's name and its regular expression, + -which has the following structure: + -(field name, field RegEx) + +=== Scapy packet result payload [[build_pkt_output]] +build_pkt and reconstruct pkt take packet model and produce result JSON, +with the binary payload and field values and offsets defined -Example: this is the Regex for the 'MACField' protocol -[source,bash] ---- -{'MACField': '^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$'} +{ + "binary": "AAAAAQAAAAAAAgAACABFAAAoAAEAAEAGOs4QAAABMAAAAQAUAFAAAAAAAAAAAFACIABPfQAA", // base64 encoded binary payload + "data": [ + { + "id": "Ether", // scapy class + "name": "Ethernet", // human-readable protocol name + "offset": 0, // global offset for all fields + "fields": [ + { + "id": "dst", // scapy field id + "hvalue": "00:00:00:01:00:00", // human readable value + "length": 6, // 6 bytes + "offset": 0, // 0 bytes offset from + "value": "00:00:00:01:00:00" // internal value, which for this type is the same as hvalue + }, + { + "id": "src", + ... // same as for dst + }, + { + "hvalue": "IPv4", // human-readable value + "id": "type", + "length": 2, + "offset": 12, // + "value": 2048 // integer value for IPv4(0x800) + } + ] + }, + { + "id": "IP", + "name": "IP", + "offset": 14, + "fields": [ + { + "hvalue": "4", + "id": "version", + "length": 0, // the length is 0, which means it is a bitfield. mask should be used to show location + "offset": 0, // offset from the IP.offset. it needs to be added to all fields of IP + "value": 4 + }, + { + "hvalue": "5", + "id": "ihl", + "length": 0, // again length is 0. that's other part of the first byte of IP + "offset": 0, + "value": 5 + }, + { + "hvalue": "0x0", + "id": "tos", + "length": 1, + "offset": 1, + "value": 0 + }, + { + "hvalue": "40", + "id": "len", + "length": 2, + "offset": 2, + "value": 40 + }, + { + "hvalue": "1", + "id": "id", + "length": 2, + "offset": 4, + "value": 1 + }, + { + "hvalue": "", // no flags are specified here. but this field can contain "US" for URG+SYN flags + "id": "flags", + "length": 0, + "offset": 6, + "value": 0 + }, + { + "hvalue": "0", + "id": "frag", + "length": 0, + "offset": 6, + "value": 0 + }, + { + "hvalue": "64", + "id": "ttl", + "length": 1, + "offset": 8, + "value": 64 + }, + { + "hvalue": "tcp", // this field is enum. enum dictionary can be obtained as a medatata for IP fields. + "id": "proto", + "length": 1, + "offset": 9, + "value": 6 + }, + { + "hvalue": "0x3ace", + "id": "chksum", + "length": 2, + "offset": 10, + "value": 15054 + }, + { + "hvalue": "[]", + "id": "options", + "length": 2, + "offset": 20, + "value": { // options can not be representted as a human string, so they are passed as an expression + "expr": "[]", + "vtype": "EXPRESSION" + } + } + ] + }, + { + "id": "TCP", + "name": "TCP", + "offset": 34 + "fields": [ + { + "hvalue": "20", + "id": "sport", + "length": 2, + "offset": 0, + "value": 20 + }, + // .. some more TCP fields here + { + "hvalue": "{}", + "id": "options", + "ignored": true, + "length": 2, + "offset": 20, + "value": { // TCPOptions are represented as a python expression with tuple and binary buffers + "expr": "[('MSS', 1460), ('NOP', None), ('NOP', None), ('SAckOK', b'')]", + "vtype": "EXPRESSION" + } + } + ] + } + ] +} + ---- -The dictionary maintains its regular structure: -[source,bash] +=== Scapy server field definitions [[get_definitions_model]] +Scapy server can return metadata object, describing protocols and fields. +Most values, including field types are optional in the definition. +If field type is missing, it can be treated as a STRING. + ---- -{'MACField': '^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$' - 'IPField': 'IP_FIELD_REGEX' - . - . - . +"protocols": [ +{ + "id": "Ether", // scapy class + "name": "Ethernet", // name of the protocol + "fields": [ + { + "id": "dst", + "name": "Destination", // GUI will display Destination instead of dst + "type": "STRING", + "regex": "^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$" + }, + { + "id": "src", + "name": "Source", + "type": "STRING", + "regex": "^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$" + }, + { + "values_dict": { + "ATMMPOA": 34892, + "RAW_FR": 25945, + "DNA_DL": 24577, + "ATMFATE": 34948, + "ATALK": 32923, + "BPQ": 2303, + "X25": 2053, + "PPP_DISC": 34915, + "DEC": 24576, + "n_802_1Q": 33024, + "PPP_SES": 34916, + "TEB": 25944, + "SCA": 24583, + "PPP": 34827, + "FR_ARP": 2056, + "CUST": 24582, + "ARP": 2054, + "DNA_RC": 24578, + "NetBEUI": 33169, + "AARP": 33011, + "DIAG": 24581, + "IPv4": 2048, + "DNA_RT": 24579, + "IPv6": 34525, + "LAT": 24580, + "IPX": 33079, + "LOOP": 36864 + }, + "id": "type", + "name": "Type" + "type": "ENUM" + } + ] +}, +{ + "id": "TCP", + "name": "TCP", + "fields": [ + { + "id": "sport", + "name": "Source port", + "type": "NUMBER", + "min": 0, // optional min value + "max": 65535 // optional max value + + }, + { + "id": "dport", + "name": "Destination port", + "type": "NUMBER", + "min": 0, + "max": 65535 + }, + { + "id": "seq", + "name": "Sequence number", + "type": "NUMBER" + }, + { + "id": "ack", + "name": "Acknowledgment number", + "type": "NUMBER" + }, + { + "id": "dataofs", + "name": "Data offset", + "type": "NUMBER" + }, + { + "id": "reserved", + "name": "Reserved", + "type": "NUMBER" + }, + { + "id": "flags", + "name": "Flags", + "auto": false, + "type": "BITMASK", + "bits": [ // fields definition for the UI + {"name": "URG", "mask": 32, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 32}]}, + {"name": "ACK", "mask": 16, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 16}]}, + {"name": "PSH", "mask": 8, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 8}]}, + {"name": "RST", "mask": 4, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 4}]}, + {"name": "SYN", "mask": 2, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 2}]}, + {"name": "FIN", "mask": 1, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 1}]} + ] + }, + { + "id": "window", + "name": "Window size", + "type": "NUMBER" + }, + { + "id": "chksum", + "name": "Checksum", + "auto": true, + "type": "NUMBER" + }, + { + "id": "urgptr", + "name": "Urgent pointer", + "type": "NUMBER" + }, + { + "id": "options", + "name": "Options", + "type": "EXPRESSION" + } + ] +}, +{ + "id": "IP", + "name": "Internet Protocol Version 4", + "fields": [ + { + "id": "version", // only renaming + "name": "Version" + }, + { + "id": "ihl", + "name": "IHL", + "type": "NUMBER", + "auto": true // calculate IHL automatically + }, + { + "id": "tos", + "name": "TOS", + "type": "NUMBER" + }, + { + "id": "len", + "name": "Total Length", + "type": "NUMBER", + "auto": true + }, + { + "id": "id", + "name": "Identification", + "type": "NUMBER" + }, + { + "id": "flags", + "name": "Flags", + "type": "BITMASK", + "min": 0, + "max": 8, + "bits": [ // bitmask definition for IP.flags + {"name": "Reserved", "mask": 4, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 4}]}, + {"name": "Fragment", "mask": 2, "values":[{"name":"May fragment (0)", "value": 0}, {"name":"Don't fragment (1)", "value": 2}]}, + {"name": "More Fragments(MF)", "mask": 1, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 1}]} + ] + }, + { + "id": "frag", + "name": "Fragment offset", + "type": "NUMBER" + }, + { + "id": "ttl", + "name": "TTL", + "type": "NUMBER", + "min": 1, + "max": 255 + + }, + { + "id": "proto", + "name": "Protocol" + }, + { + "id": "chksum", + "name": "Checksum", + "type": "STRING", + "auto": true + }, + { + "id": "src", + "name": "Source address", + "type": "STRING", + "regexp": "regexp-to-check-this-field" + }, + { + "id": "dst", + "name": "Destination address", + "regexp": "regexp-to-check-this-field" + }, + { + "id": "options", + "name": "Options", + "type": "EXPRESSION" + } + ] +}, +{ + "id": "Dot1Q", + "name": "802.1Q", + "fields": [ + { + "id": "prio", + "name": "prio" + "type": "NUMBER", + }, + { + "id": "id", + "type": "NUMBER", + "name": "id" + }, + { + "id": "vlan", + "type": "NUMBER", + "name": "vlan" + }, + { + "values_dict": { + "ATMMPOA": 34892, + "RAW_FR": 25945, + "DNA_DL": 24577, + "ATMFATE": 34948, + "ATALK": 32923, + "BPQ": 2303, + "X25": 2053, + "PPP_DISC": 34915, + "DEC": 24576, + "n_802_1Q": 33024, + "PPP_SES": 34916, + "TEB": 25944, + "SCA": 24583, + "PPP": 34827, + "FR_ARP": 2056, + "CUST": 24582, + "ARP": 2054, + "DNA_RC": 24578, + "NetBEUI": 33169, + "AARP": 33011, + "DIAG": 24581, + "IPv4": 2048, + "DNA_RT": 24579, + "IPv6": 34525, + "LAT": 24580, + "IPX": 33079, + "LOOP": 36864 + }, + "id": "type", + "name": "type", + "type": "ENUM" + } + ] +}, +{ + "id": "Raw", + "name": "Raw", + "fields": [ + { + "id": "load", + "name": "Payload", + "type": "BYTES" + } + ] } +] + +] ---- + + == RPC Commands The following RPC commands are supported. Please refer to databases section for elaboration for each database. @@ -370,149 +735,21 @@ The following RPC commands are supported. Please refer to databases section for === Build Packet * *Name* - 'build_pkt' -* *Description* - Takes a JSON format string of a SCAPY packet. + -* *Return Value* - See table below -* *Paramters* - string describing SCAPY packet -* *Result* ['dictionary'] - a dictionary that contains: + -* pkt buffer (Hexdump encoded in base64) + -* pkt offsets - each field within the packet has its offset within the layer, and the field size + - the value returned is [ 'field offset' , 'field size' ] + -* pkt show2 - a detailed description of each field and its value - - - -.Object type 'return values for build_pkt' -[options="header",cols="1,1,3"] -|================= -| Field | Type | Description -| pkt buffer | Hexdump encoded in base64 | The packet's dump -| pkt offsets | Dictionary of layers | Each layer contains it's offsets within the layer, and a global offset within the packet -| pkt show2 | Dictionary of layers | Each layer is a dictionary of fields, which contains the values for each field -|================= - -*Example:* + -Successful assembly of a packet: + -[source,bash] ----- - -'Request': - -{ - "id": "1", - "jsonrpc": "2.0", - "method": "build_pkt", - "params": ["Ether()/IP(src='127.0.0.1')/TCP(sport=80)"] -} - -'Response': - -{ - 'id': '1', - 'jsonrpc': '2.0', - 'result': { 'buffer': 'AAAAAQAAAAAAAgAACABFAAAoAAEAAEAGy81/AAABMAAAAQBQAFAAAAAAAAAAAFACIADgQAAA\n', - 'offsets':{ - 'Ether': { - 'dst': [0, 6], - 'global_offset': 0, - 'src': [6, 6], - 'type': [12, 2] - }, - 'IP': { - 'chksum': [10, 2], - 'dst': [16, 4], - 'flags': [6, 0], - 'frag': [6, 0], - 'global_offset': 14, - 'id': [4, 2], - 'ihl': [0, 0], - 'len': [2, 2], - 'options': [20, 2], - 'proto': [9, 1], - 'src': [12, 4], - 'tos': [1, 1], - 'ttl': [8, 1], - 'version': [0, 0] - }, - 'TCP': { - 'ack': [8, 4], - 'chksum': [16, 2], - 'dataofs': [12, 0], - 'dport': [2, 2], - 'flags': [13, 0], - 'global_offset': 34, - 'options': [20, 2], - 'reserved': [12, 0], - 'seq': [4, 4], - 'sport': [0, 2], - 'urgptr': [18, 2], - 'window': [14, 2] - } - }, - 'show2': { - 'Ethernet': { - 'dst': '00:00:00:01:00:00', - 'src': '00:00:00:02:00:00', - 'type': '0x800' - }, - 'IP': { - 'chksum': '0xcbcd', - 'dst': '48.0.0.1', - 'flags': '', - 'frag': '0L', - 'id': '1', - 'ihl': '5L', - 'len': '40', - 'proto': 'tcp', - 'src': '127.0.0.1', - 'tos': '0x0', - 'ttl': '64', - 'version': '4L' - }, - 'TCP': { - 'ack': '0', - 'chksum': '0xe040', - 'dataofs': '5L', - 'dport': '80', - 'flags': 'S', - 'options': '{}', - 'reserved': '0L', - 'seq': '0', - 'sport': '80', - 'urgptr': '0', - 'window': '8192' - } - } - } -} - - ----- - -Unsuccessful assembly of a packet: + -[source,bash] ----- - -'Request': - -{ - "id": "zweuldlh", - "jsonrpc": "2.0", - "method": "build_pkt", - "params": "ETHER()-IP()" //not a valid SCAPY packet string -} - -'Response': - -{ - 'id': 'zweuldlh', - 'jsonrpc': '2.0', - 'error': { - 'code': -32098, - 'message:': "Scapy Server: unsupported operand type(s) for -: 'Ether' and 'IP'" - } -} - ----- +* *Description* - Builds a new packet from the definition and returns binary data and json structure + +* *Return Value* - Returns xref:build_pkt_output[Scapy packet result payload]. +* *Paramters* - JSON xref:build_pkt_input[packet definition model]. + +=== Create packet from binary data and modify fields +* *Name* - 'reconstruct_pkt' +* *Description* - Builds a new packet from the binary data and returns binary data and json structure + +* *Return Value* - Returns xref:build_pkt_output[Scapy packet result payload]. +* *Paramters* - base64-encoded packet bytes, optional JSON xref:build_pkt_input[packet definition model] with fields to override. + +=== Get protocol definitions +* *Name* - 'get_definitions' +* *Description* - Returns definitions for protocols and fields + +* *Return Value* - array of protocol definitions in a "result.protocols" json. xref:get_definitions_model[Output model] +* *Paramters* - array of protocol class names to define or null to fetch metadata for all protocols. ex. ["Ether", "TCP"] === Get protocol tree hierarchy example * *Name* - 'get_tree' -- cgit 1.2.3-korg From 0437a251cb62da595b048a4ff6370b646f3a1fbe Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Tue, 18 Oct 2016 02:06:25 +0300 Subject: typo --- trex_stateless_bench.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trex_stateless_bench.asciidoc b/trex_stateless_bench.asciidoc index d4bd5367..2e0cbf3a 100755 --- a/trex_stateless_bench.asciidoc +++ b/trex_stateless_bench.asciidoc @@ -122,7 +122,7 @@ Printed table with interfaces info: | 4 | 0 | 0a:00.0 | 04:62:73:5f:e8:a8 | I350 Gigabit Network Connection | igb | p4p1 | \*Active* | 5 | 0 | 0a:00.1 | 04:62:73:5f:e8:a9 | I350 Gigabit Network Connection | igb | p4p2 | | 6 | 1 | 84:00.0 | 68:05:CA:32:0C:38 | Device 1583 | igb_uio | | -| 7 | 1 | 84:00.1 | 68:05:CA:32:0C:39 | Device 1583 | igb_uio | |d +| 7 | 1 | 84:00.1 | 68:05:CA:32:0C:39 | Device 1583 | igb_uio | | |================= We will be asked to specify interfaces for TRex usage: -- cgit 1.2.3-korg From 83de7ca94c760be89fbc79c713b358f0aefdd4ed Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Tue, 25 Oct 2016 16:30:29 +0300 Subject: minor fixes after xored spec update Signed-off-by: Hanoh Haim --- trex_scapy_rpc_server.asciidoc | 82 ++++++++++++++++++++++-------------------- 1 file changed, 44 insertions(+), 38 deletions(-) diff --git a/trex_scapy_rpc_server.asciidoc b/trex_scapy_rpc_server.asciidoc index 6dce9180..65d9e7b2 100755 --- a/trex_scapy_rpc_server.asciidoc +++ b/trex_scapy_rpc_server.asciidoc @@ -18,7 +18,7 @@ include::trex_ga.asciidoc[] | Version | name | meaning | 1.00 | Itamar Raviv (itraviv) | - first version - +| 2.00 | Anton (XORED) | update by xored software |================= @@ -33,7 +33,7 @@ The server operates on a Request-Response basis *over ZMQ*, and does not support Read more about ZMQ link:http://zguide.zeromq.org/page:all[here] -image::images/Scapy_JSON_rpc_server.png[title="Scapy JSON RPC Server",align="left",width=800, link="images/scapy_json_rpc_server.png"] +image::images/scapy_json_rpc_server.png[title="Scapy JSON RPC Server",align="left",width=800, link="images/scapy_json_rpc_server.png"] === Error Codes @@ -63,10 +63,12 @@ For reconstruct_pkt default values will be taken from the original packet. Exaples of JSON payloads and their scapy expression alternatives +[source,python] ---- Ether(src="de:ad:be:ef:de:ad")/Dot1Q()/Dot1Q(vtype=1)/IP(src="127.0.0.1", chksum="0x312")/TCP(sport=443) ---- +[source,python] ---- [ { "id": "Ether", "fields": [{"id": "src", "value": "de:ad:be:ef:de:ad"}] }, @@ -81,6 +83,7 @@ Ether(src="de:ad:be:ef:de:ad")/Dot1Q()/Dot1Q(vtype=1)/IP(src="127.0.0.1", chksum Most values can be passed as strings(including decimal numbers, hex numbers, enums, values), but for binary payload, value object should be used +[source,python] ---- - int/long/str - they can de specified directly as a value of a field - {"vtype": "BYTES", "base64": "my_payload_base64"} - binary payload passed as base64 @@ -94,6 +97,7 @@ Example of object value usage(to specify binary payload) Ether()/IP()/TCP()/Raw(load=my_payload) ---- +[source,python] ---- [ { "id": "Ether"}, @@ -112,32 +116,33 @@ Ether()/IP()/TCP()/Raw(load=my_payload) build_pkt and reconstruct pkt take packet model and produce result JSON, with the binary payload and field values and offsets defined +[source,python] ---- { "binary": "AAAAAQAAAAAAAgAACABFAAAoAAEAAEAGOs4QAAABMAAAAQAUAFAAAAAAAAAAAFACIABPfQAA", // base64 encoded binary payload "data": [ { - "id": "Ether", // scapy class - "name": "Ethernet", // human-readable protocol name - "offset": 0, // global offset for all fields + "id": "Ether", # scapy class + "name": "Ethernet", # human-readable protocol name + "offset": 0, # global offset for all fields "fields": [ { - "id": "dst", // scapy field id - "hvalue": "00:00:00:01:00:00", // human readable value - "length": 6, // 6 bytes - "offset": 0, // 0 bytes offset from - "value": "00:00:00:01:00:00" // internal value, which for this type is the same as hvalue + "id": "dst", # scapy field id + "hvalue": "00:00:00:01:00:00", # human readable value + "length": 6, # 6 bytes + "offset": 0, # 0 bytes offset from + "value": "00:00:00:01:00:00" # internal value, which for this type is the same as hvalue }, { "id": "src", - ... // same as for dst + ... # same as for dst }, { - "hvalue": "IPv4", // human-readable value + "hvalue": "IPv4", # human-readable value "id": "type", "length": 2, - "offset": 12, // - "value": 2048 // integer value for IPv4(0x800) + "offset": 12, # + "value": 2048 # integer value for IPv4(0x800) } ] }, @@ -149,14 +154,14 @@ with the binary payload and field values and offsets defined { "hvalue": "4", "id": "version", - "length": 0, // the length is 0, which means it is a bitfield. mask should be used to show location - "offset": 0, // offset from the IP.offset. it needs to be added to all fields of IP + "length": 0, # the length is 0, which means it is a bitfield. mask should be used to show location + "offset": 0, # offset from the IP.offset. it needs to be added to all fields of IP "value": 4 }, { "hvalue": "5", "id": "ihl", - "length": 0, // again length is 0. that's other part of the first byte of IP + "length": 0, # again length is 0. that's other part of the first byte of IP "offset": 0, "value": 5 }, @@ -182,7 +187,7 @@ with the binary payload and field values and offsets defined "value": 1 }, { - "hvalue": "", // no flags are specified here. but this field can contain "US" for URG+SYN flags + "hvalue": "", # no flags are specified here. but this field can contain "US" for URG+SYN flags "id": "flags", "length": 0, "offset": 6, @@ -203,7 +208,7 @@ with the binary payload and field values and offsets defined "value": 64 }, { - "hvalue": "tcp", // this field is enum. enum dictionary can be obtained as a medatata for IP fields. + "hvalue": "tcp", # this field is enum. enum dictionary can be obtained as a medatata for IP fields. "id": "proto", "length": 1, "offset": 9, @@ -221,7 +226,7 @@ with the binary payload and field values and offsets defined "id": "options", "length": 2, "offset": 20, - "value": { // options can not be representted as a human string, so they are passed as an expression + "value": { # options can not be representted as a human string, so they are passed as an expression "expr": "[]", "vtype": "EXPRESSION" } @@ -240,14 +245,14 @@ with the binary payload and field values and offsets defined "offset": 0, "value": 20 }, - // .. some more TCP fields here + # .. some more TCP fields here { "hvalue": "{}", "id": "options", "ignored": true, "length": 2, "offset": 20, - "value": { // TCPOptions are represented as a python expression with tuple and binary buffers + "value": { # TCPOptions are represented as a python expression with tuple and binary buffers "expr": "[('MSS', 1460), ('NOP', None), ('NOP', None), ('SAckOK', b'')]", "vtype": "EXPRESSION" } @@ -264,15 +269,16 @@ Scapy server can return metadata object, describing protocols and fields. Most values, including field types are optional in the definition. If field type is missing, it can be treated as a STRING. +[source,python] ---- "protocols": [ { - "id": "Ether", // scapy class - "name": "Ethernet", // name of the protocol + "id": "Ether", # scapy class + "name": "Ethernet", # name of the protocol "fields": [ { "id": "dst", - "name": "Destination", // GUI will display Destination instead of dst + "name": "Destination", # GUI will display Destination instead of dst "type": "STRING", "regex": "^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$" }, @@ -326,8 +332,8 @@ If field type is missing, it can be treated as a STRING. "id": "sport", "name": "Source port", "type": "NUMBER", - "min": 0, // optional min value - "max": 65535 // optional max value + "min": 0, # optional min value + "max": 65535 # optional max value }, { @@ -362,7 +368,7 @@ If field type is missing, it can be treated as a STRING. "name": "Flags", "auto": false, "type": "BITMASK", - "bits": [ // fields definition for the UI + "bits": [ # fields definition for the UI {"name": "URG", "mask": 32, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 32}]}, {"name": "ACK", "mask": 16, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 16}]}, {"name": "PSH", "mask": 8, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 8}]}, @@ -399,14 +405,14 @@ If field type is missing, it can be treated as a STRING. "name": "Internet Protocol Version 4", "fields": [ { - "id": "version", // only renaming + "id": "version", # only renaming "name": "Version" }, { "id": "ihl", "name": "IHL", "type": "NUMBER", - "auto": true // calculate IHL automatically + "auto": true # calculate IHL automatically }, { "id": "tos", @@ -430,7 +436,7 @@ If field type is missing, it can be treated as a STRING. "type": "BITMASK", "min": 0, "max": 8, - "bits": [ // bitmask definition for IP.flags + "bits": [ # bitmask definition for IP.flags {"name": "Reserved", "mask": 4, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 4}]}, {"name": "Fragment", "mask": 2, "values":[{"name":"May fragment (0)", "value": 0}, {"name":"Don't fragment (1)", "value": 2}]}, {"name": "More Fragments(MF)", "mask": 1, "values":[{"name":"Not Set", "value": 0}, {"name":"Set", "value": 1}]} @@ -563,7 +569,7 @@ The following RPC commands are supported. Please refer to databases section for *Example:* -[source, bash] +[source,python] ---- 'Request': { @@ -610,7 +616,7 @@ The following RPC commands are supported. Please refer to databases section for *Example:* -[source,bash] +[source,python] ---- 'Request': { @@ -658,7 +664,7 @@ The following RPC commands are supported. Please refer to databases section for + *Example:* -[source,bash] +[source,python] ---- 'Request': @@ -669,7 +675,7 @@ The following RPC commands are supported. Please refer to databases section for "params": ["md5_of_protocol_db", "md5_of_fields"] } -'Response': //on failure +'Response': #on failure { "jsonrpc": "2.0", @@ -680,7 +686,7 @@ The following RPC commands are supported. Please refer to databases section for } } -'Response': //on success +'Response': #on success { "jsonrpc": "2.0", @@ -706,7 +712,7 @@ The following RPC commands are supported. Please refer to databases section for *Example:* -[source,bash] +[source,python] ---- 'Request': @@ -760,7 +766,7 @@ User can still create non valid hierarchies. (such as Ether()/DNS()/IP()) *Example:* -[source,bash] +[source,python] ---- 'Request': -- cgit 1.2.3-korg From fda47173d794b9730ac2e7069bb7315d3000309e Mon Sep 17 00:00:00 2001 From: Ido Barnea Date: Thu, 13 Oct 2016 17:06:45 +0300 Subject: Adding documentation for ARP feature + major fixes to stateful and getting started Signed-off-by: Ido Barnea --- images/trex-asr-setup.png | Bin 0 -> 42119 bytes images/trex-not-supported-setup.png | Bin 0 -> 59543 bytes release_notes.asciidoc | 7 +- trex_book.asciidoc | 760 ++++++++++++++++++------------------ trex_book_basic.asciidoc | 152 +++----- trex_config.asciidoc | 221 +++++------ trex_faq.asciidoc | 20 +- trex_preso.asciidoc | 5 +- 8 files changed, 569 insertions(+), 596 deletions(-) create mode 100644 images/trex-asr-setup.png create mode 100644 images/trex-not-supported-setup.png diff --git a/images/trex-asr-setup.png b/images/trex-asr-setup.png new file mode 100644 index 00000000..3cc89d76 Binary files /dev/null and b/images/trex-asr-setup.png differ diff --git a/images/trex-not-supported-setup.png b/images/trex-not-supported-setup.png new file mode 100644 index 00000000..5b0ac3ce Binary files /dev/null and b/images/trex-not-supported-setup.png differ diff --git a/release_notes.asciidoc b/release_notes.asciidoc index 23c8002a..b1cb15e0 100755 --- a/release_notes.asciidoc +++ b/release_notes.asciidoc @@ -23,8 +23,13 @@ ifdef::backend-docbook[] endif::backend-docbook[] -== Release 2.09 == +== Release 2.10 == + +* Added support for IP based configuration files (As opposed to MAC based used until now), with the ability of TRex to send +ARP requests for default gateway, and gratitues ARP for its own addresses. +See link:trex_manual.html#_configuration_yaml_parameter_of_cfg_option[here] and link:trex_config_guide.html[here] for details. +== Release 2.09 == * Statless, split to core algorithm is more accurate and simple see link:trex_stateless.html#_tutorial_field_engine_split_to_core[split_to_core] * Add repeatable random instruction see an example link:https://github.com/cisco-system-traffic-generator/trex-core/tree/master/scripts/stl/udp_1pkt_repeat_random.py[stl/udp_1pkt_repeat_random.py] link:cp_stl_docs/api/field_engine.html#stlvmflowvarrepetablerandom[repetable_random] and link:trex_rpc_server_spec.html#_repetable_random[repetable_random_spec] diff --git a/trex_book.asciidoc b/trex_book.asciidoc index 79941f87..e3eac9b0 100755 --- a/trex_book.asciidoc +++ b/trex_book.asciidoc @@ -18,20 +18,19 @@ include::trex_ga.asciidoc[] Traditionally, routers have been tested using commercial traffic generators, while performance typically has been measured using packets per second (PPS) metrics. As router functionality and -services have become more complex, stateful traffic generators have become necessary to -provide more realistic application traffic scenarios. +services became more complex, stateful traffic generators now need to provide more realistic traffic scenarios. Advantages of realistic traffic generators: -* Accurate performance metrics -* Discovering bottlenecks in realistic traffic scenarios +* Accurate performance metrics. +* Discovering bottlenecks in realistic traffic scenarios. ==== Current Challenges: -* *Cost*: Commercial stateful traffic generators are expensive -* *Scale*: Bandwidth does not scale up well with feature complexity -* *Standardization*: Lack of standardization of traffic patterns and methodologies -* *Flexibility*: Commercial tools do not allow agility when flexibility and changes are needed +* *Cost*: Commercial stateful traffic generators are very expensive. +* *Scale*: Bandwidth does not scale up well with feature complexity. +* *Standardization*: Lack of standardization of traffic patterns and methodologies. +* *Flexibility*: Commercial tools do not allow agility when flexibility and changes are needed. ==== Implications @@ -49,9 +48,9 @@ TRex addresses these problems through an innovative and extendable software impl * Stateful traffic generator based on pre-processing and smart replay of real traffic templates. * Generates and *amplifies* both client and server side traffic. * Customized functionality can be added. -* Scales to 200Gb/sec for one UCS (using Intel 40Gb/sec NICs) -* Low cost -* Self-contained package that can be easily installed and deployed +* Scales to 200Gb/sec for one UCS (using Intel 40Gb/sec NICs). +* Low cost. +* Self-contained package that can be easily installed and deployed. * Virtual interface support enables TRex to be used in a fully virtual environment without physical NICs. Example use cases: ** Amazon AWS ** Cisco LaaS @@ -321,7 +320,7 @@ $[root@trex]lspci | grep Ethernet === Obtaining the TRex package -Connect by `ssh` to the TRex machine and execute the commands described below. +Connect using `ssh` to the TRex machine and execute the commands described below. NOTE: Prerequisite: *$WEB_URL* is *{web_server_url}* or *{local_web_server_url}* (Cisco internal) @@ -349,120 +348,112 @@ $wget --no-cache $WEB_URL/release/vX.XX.tar.gz #<1> <1> X.XX = Version number -=== Running TRex for the first time in loopback +== First time Running -Before jumping to check the DUT, you could verify TRex and NICs working in loopback. + -For performance-wise, it's better to connect interfaces on the same NUMA (controlled by one physical processor) + -However, if you have a 10Gb/sec interfaces (based on Intel 520-D2 NICs), and you connect ports that are on the same NIC to each other with SFP+, it might not sync. + -We have checked many SFP+ (Intel/Cisco/SR/LR) and had link. + -If you are still facing this issue you could either try to connect interfaces of different NICs or use link:http://www.fiberopticshare.com/tag/cisco-10g-twinax[Cisco twinax copper cable]. +=== Configuring for loopback + +Before connecting TRex to your DUT, it is strongly advised to verify that TRex and the NICs work correctly in loopback. + +To get best performance, it is advised to loopback interfaces on the same NUMA (controlled by the same physical processor). If you do not know how to check this, you can ignore this advice for now. + + +[NOTE] +===================================================================== +If you are using 10Gbs NIC based on Intel 520-D2 NICs, and you loopback ports on the same NIC, using SFP+, it might not sync, and you will fail to get link up. + +We checked many types of SFP+ (Intel/Cisco/SR/LR) and it worked for us. + +If you still encounter link issues, you can either try to loopback interfaces from different NICs, or use link:http://www.fiberopticshare.com/tag/cisco-10g-twinax[Cisco twinax copper cable]. +===================================================================== .Loopback example image:images/loopback_example.png[title="Loopback example"] -If you have a 1Gb/Sec Intel NIC (I350) or XL710/X710 NIC, you can connect any port to any port from loopback perspective *but* first filter the management port - see xref:trex_config[TRex Configuration]. - ==== Identify the ports [source,bash] ---- - $>sudo ./dpdk_setup_ports.py --s + $>sudo ./dpdk_setup_ports.py -s Network devices using DPDK-compatible driver ============================================ Network devices using kernel driver =================================== - 0000:02:00.0 '82545EM Gigabit Ethernet Controller (Copper)' if=eth2 drv=e1000 unused=igb_uio *Active* 0000:03:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<1> - 0000:03:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<2> - 0000:13:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<3> - 0000:13:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<4> - + 0000:03:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb + 0000:13:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb + 0000:13:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb + 0000:02:00.0 '82545EM Gigabit Ethernet Controller (Copper)' if=eth2 drv=e1000 unused=igb_uio *Active* #<2> Other network devices ===================== ---- -<1> TRex interface #1 before unbinding -<2> TRex interface #2 before unbinding -<3> TRex interface #3 before unbinding -<4> TRex interface #4 before unbinding -Choose a port to use and follow instructions in the next section to create a configuration file. +<1> If you did not run any DPDK application, you will see list of interfaces binded to the kernel, or not binded at all. +<2> Interface marked as 'active' is the one used by your ssh connection. *Never* put it in TRex config file. -==== Create minimum configuration file +Choose ports to use and follow the instructions in the next section to create configuration file. -Create a configuration file: `/etc/trex_cfg.yaml`. +==== Creating minimum configuration file -You can copy a basic configuration file from cfg folder by running this command... +Default configuration file name is: `/etc/trex_cfg.yaml`. + +You can copy basic configuration file from cfg folder [source,bash] ---- $cp cfg/simple_cfg.yaml /etc/trex_cfg.yaml ---- -...and edit the configuration file with the desired values. +Then, edit the configuration file and put your interface's and IP addresses details. Example: [source,bash] ---- -- port_limit : 4 #<1> - version : 2 #<2> - interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] #<3> +- port_limit : 2 + version : 2 +#List of interfaces. Change to suit your setup. Use ./dpdk_setup_ports.py -s to see available options +interfaces : ["03:00.0", "03:00.1"] #<1> + port_info : # Port IPs. Change to suit your needs. In case of loopback, you can leave as is. + - ip : 1.1.1.1 + default_gw : 2.2.2.2 + - ip : 2.2.2.2 + default_gw : 1.1.1.1 ---- -<1> Mumber of ports -<2> Must add version 2 to the configuration file -<3> List of interfaces displayed by `#>sudo ./dpdk_setup_ports.py -s` +<1> You need to edit this line to match the interfaces you are using. +Notice that all NICs you are using should have the same type. You cannot mix different NIC types in one config file. For more info, see link:http://trex-tgn.cisco.com/youtrack/issue/trex-201[trex-201]. -When working with a VM, set the destination MAC of one port as the source or the other for loopback the port in the vSwitch -and you should take the right value from the hypervisor (in case of a physical NIC you can set the MAC address with virtual you can't and you should take it from the hypervisor) -and example +You can find xref:trex_config[here] full list of configuration file options. -// Clarify paragraph above. +=== Script for creating config file -[source,python] ----- - - port_limit : 2 - version : 2 - interfaces : ["03:00.0","03:00.1"] <2> - port_info : # set eh mac addr - - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] # port 0 - src_mac : [0x2,0x0,0x0,0x2,0x0,0x00] <1> - - dest_mac : [0x2,0x0,0x0,0x2,0x0,0x00] # port 1 <1> - src_mac : [0x1,0x0,0x0,0x1,0x0,0x00] ----- -<1> Source MAC is like destination MAC (this should be set or taken from VMware). The MAC was taken from the hypervisor. -<2> Currently TRex supports only one type of NIC at a time. You cannot mix different NIC types in one config file. For more info, see link:http://trex-tgn.cisco.com/youtrack/issue/trex-197[trex-201]. +To help starting with basic configuration file that suits your needs, there a script that can automate this process. +The script helps you getting started, and you can then edit the file and add advanced options from xref:trex_config[here] +if needed. + +There are two ways to run the script. Interactively (script will pormpt you for parameters), or providing all parameters +using command line options. -// where can we describe this limitation (TRex supports only one type of NIC at a time. You cannot mix different NIC types in one config file.) and other limitations? - -==== Script for creating config file - -===== Interactive mode +==== Interactive mode [source,bash] ---- sudo ./dpdk_setup_ports.py -i ---- -Will be printed table with all interfaces and related information. + -Then, user is asked to provide desired interfaces, MAC destinations etc. +You will see a list of available interfaces with their related information + +Just follow the instructions to get basic config file. -===== Specifying input arguments from CLI +==== Specifying input arguments using command line options -Another option is to run script with all the arguments given directly from CLI. + -Run this command to see list of all interfaces and related information: +First, run this command to see the list of all interfaces and their related information: [source,bash] ---- sudo ./dpdk_setup_ports.py -t ---- -* In case of *Loopback* and/or only *L1-L2 Switches* on the way, no need to provide destination MACs. + -Will be assumed connection 0↔1, 2↔3 etc. + +* In case of *Loopback* and/or only *L1-L2 Switches* on the way, you do not need to provide IPs or destination MACs. + +The script Will assume the following interface connections: 0↔1, 2↔3 etc. + Just run: [source,bash] @@ -470,12 +461,8 @@ Just run: sudo ./dpdk_setup_ports.py -c ... ---- -* In case of *Router* (or other next hop device, such as *L3 Switch*), should be specified MACs of router interfaces as destination. - -[source,bash] ----- -sudo ./dpdk_setup_ports.py -c ... --dest-macs ... ----- +* In case of *Router* (or other next hop device, such as *L3 Switch*), you should specify the TRex IPs and default gateways, or +MACs of the router as described below. .Additional arguments to creating script (dpdk_setup_ports.py -c) [options="header",cols="2,5,3",width="100%"] @@ -484,7 +471,9 @@ sudo ./dpdk_setup_ports.py -c ... --dest-m | -c | Create a configuration file by specified interfaces (PCI address or Linux names: eth1 etc.) | -c 03:00.1 eth1 eth4 84:00.0 | --dump | Dump created config to screen. | | -o | Output the config to this file. | -o /etc/trex_cfg.yaml -| --dest-macs | Destination MACs to be used in created yaml file per each interface. Without specifying the option, will be assumed loopback (0↔1, 2↔3 etc.) | --dest-macs 11:11:11:11:11:11 22:22:22:22:22:22 +| --dest-macs | Destination MACs to be used per each interface. Specify this option if you want MAC based config instead of IP based one. You must not set it together with --ip and --def_gw | --dest-macs 11:11:11:11:11:11 22:22:22:22:22:22 +| --ip | List of IPs to use for each interface. If this option and --dest-macs is not specified, script assumes loopback connections (0↔1, 2↔3 etc.) | --ip 1.2.3.4 5.6.7.8 +|--def-gw | List of default gateways to use for each interface. If --ip given, you must provide --def_gw as well | --def-gw 3.4.5.6 7.8.9.10 | --ci | Cores include: White list of cores to use. Make sure there is enough for each NUMA. | --ci 0 2 4 5 6 | --ce | Cores exclude: Black list of cores to exclude. Make sure there will be enough for each NUMA. | --ci 10 11 12 | --no-ht | No HyperThreading: Use only one thread of each Core in created config yaml. | @@ -494,24 +483,65 @@ sudo ./dpdk_setup_ports.py -c ... --dest-m | --ignore-numa | Advanced option: Ignore NUMAs for config creation. Use this option only if you have to, as it might reduce performance. For example, if you have pair of interfaces at different NUMAs | |================= -==== Run TRex +=== Configuring ESXi for running TRex + +To get best performance, it is advised to run TRex on bare metal hardware, and not use any kind of VM. +Bandwidth on VM might be limited, and IPv6 might not be fully supported. +Having said that, there are sometimes benefits for running on VM. + +These include: + + * Virtual NICs can be used to bridge between TRex and NICs not supported by TRex. + + * If you already have VM installed, and do not require high performance. + + +1. Click the host machine, enter Configuration -> Networking. + +a. One of the NICs should be connected to the main vSwitch network to get an "outside" connection, for the TRex client and ssh: + +image:images/vSwitch_main.png[title="vSwitch_main"] + +b. Other NICs that are used for TRex traffic should be in distinguish vSwitch: + +image:images/vSwitch_loopback.png[title="vSwitch_loopback"] + +2. Right-click guest machine -> Edit settings -> Ensure the NICs are set to their networks: + +image:images/vSwitch_networks.png[title="vSwitch_networks"] + +[NOTE] +===================================================================== +Before version 2.10, the following command did not function as expected: +[subs="quotes"] +.... +sudo ./t-rex-64 -f cap2/dns.yaml *--lm 1 --lo* -l 1000 -d 100 +.... +The vSwitch did not "know" where to route the packet. Was solved on version 2.10 when TRex started to support ARP. +===================================================================== + +* Pass-through is the way to use directly the NICs from host machine inside the VM. Has no limitations except the NIC/hardware itself. The only difference via bare-metal OS is occasional spikes of latency (~10ms). Passthrough settings cannot be saved to OVA. + +1. Click on the host machine. Enter Configuration -> Advanced settings -> Edit. Mark the desired NICs. Reboot the ESXi to apply. + +image:images/passthrough_marking.png[title="passthrough_marking"] + +2. Right click on guest machine. Edit settings -> Add -> *PCI device* -> Choose the NICs one by one. + +image:images/passthrough_adding.png[title="passthrough_adding"] + +=== Configuring for running with router (or other L3 device) as DUT + +You can follow link:trex_config_guide.html[this] presentation for an example of how to configure router as DUT. + +=== Running TRex -Use the following command to begin operation of a 4x 10Gb/sec TRex: +When all is set, use the following command to start basic TRex run for 10 seconds +(it will use the default config file name /etc/trex_cfg.yaml): [source,bash] ---- -$sudo ./t-rex-64 -f cap2/dns.yaml -c 4 -m 1 -d 100 -l 1000 +$sudo ./t-rex-64 -f cap2/dns.yaml -c 4 -m 1 -d 10 -l 1000 ---- -NOTE: For a 10Gb/sec TRex with 2, 6, or 8 ports, add `--limit-ports [number of ports]` *or* follow xref:trex_config[these instructions] to configure TRex. - If successful, the output will be similar to the following: [source,python] ---- -$ sudo ./t-rex-64 -f cap2/dns.yaml -d 100 -l 1000 -Starting TRex 1.50 please wait ... +$ sudo ./t-rex-64 -f cap2/dns.yaml -d 10 -l 1000 +Starting TRex 2.09 please wait ... zmq publisher at: tcp://*:4500 - number of ports founded : 4 + number of ports found : 4 port : 0 ------------ link : link : Link Up - speed 10000 Mbps - full-duplex <1> @@ -542,48 +572,46 @@ zmq publisher at: tcp://*:4500 Tx Bw | 217.09 Kbps | 217.14 Kbps | 216.83 Kbps | 216.83 Kbps -Global stats enabled - Cpu Utilization : 0.0 % <12> 29.7 Gb/core <13> + Cpu Utilization : 0.0 % <2> 29.7 Gb/core <3> Platform_factor : 1.0 - Total-Tx : 867.89 Kbps <2> - Total-Rx : 867.86 Kbps <3> + Total-Tx : 867.89 Kbps <4> + Total-Rx : 867.86 Kbps <5> Total-PPS : 1.64 Kpps Total-CPS : 0.50 cps - Expected-PPS : 2.00 pps <9> - Expected-CPS : 1.00 cps <10> - Expected-BPS : 1.36 Kbps <11> + Expected-PPS : 2.00 pps <6> + Expected-CPS : 1.00 cps <7> + Expected-BPS : 1.36 Kbps <8> - Active-flows : 0 <6> Clients : 510 Socket-util : 0.0000 % - Open-flows : 1 <7> Servers : 254 Socket : 1 Socket/Clients : 0.0 - drop-rate : 0.00 bps <8> + Active-flows : 0 <9> Clients : 510 Socket-util : 0.0000 % + Open-flows : 1 <10> Servers : 254 Socket : 1 Socket/Clients : 0.0 + drop-rate : 0.00 bps <11> current time : 5.3 sec test duration : 94.7 sec -Latency stats enabled - Cpu Utilization : 0.2 % <14> + Cpu Utilization : 0.2 % <12> if| tx_ok , rx_ok , rx ,error, average , max , Jitter , max window | , , check, , latency(usec),latency (usec) ,(usec) , -------------------------------------------------------------------------------------------------- - 0 | 1002, 1002, 0, 0, 51 , 69, 0 | 0 69 67 <4> - 1 | 1002, 1002, 0, 0, 53 , 196, 0 | 0 196 53 <5> + 0 | 1002, 1002, 0, 0, 51 , 69, 0 | 0 69 67 <13> + 1 | 1002, 1002, 0, 0, 53 , 196, 0 | 0 196 53 2 | 1002, 1002, 0, 0, 54 , 71, 0 | 0 71 69 3 | 1002, 1002, 0, 0, 53 , 193, 0 | 0 193 52 ---- <1> Link must be up for TRex to work. -<2> Total Rx must be the same as Tx -<3> Total Rx must be the same as Tx -<4> Tx_ok == Rx_ok -<5> Tx_ok == Rx_ok -<6> Number of TRex active "flows". Could be different than the number of router flows, due to aging issues. Usualy the TRex number of active flows is much lower than that of the router. -<7> Total number of TRex flows opened since startup (including active ones, and ones already closed). -<8> Drop rate. -<9> Expected number of packets per second (calculated without latency packets). -<10> Expected number of connections per second (calculated without latency packets). -<11> Expected number of bits per second (calculated without latency packets). -<12> Average CPU utilization of transmitters threads. For best results it should be lower than 80%. -<13> Gb/sec generated per core of DP. Higher is better. -<14> Rx and latency thread CPU utilization. - +<2> Average CPU utilization of transmitters threads. For best results it should be lower than 80%. +<3> Gb/sec generated per core of DP. Higher is better. +<4> Total Tx must be the same as Rx at the end of the run +<5> Total Rx must be the same as Tx at the end of the run +<6> Expected number of packets per second (calculated without latency packets). +<7> Expected number of connections per second (calculated without latency packets). +<8> Expected number of bits per second (calculated without latency packets). +<9> Number of TRex active "flows". Could be different than the number of router flows, due to aging issues. Usualy the TRex number of active flows is much lower than that of the router because the router ages flows slower. +<10> Total number of TRex flows opened since startup (including active ones, and ones already closed). +<11> Drop rate. +<12> Rx and latency thread CPU utilization. +<13> Tx_ok on port 0 should equal Rx_ok on port 1, and vice versa. More statistics information: @@ -591,62 +619,16 @@ More statistics information: *Socket/Clients*:: Average of active flows per client, calculated as active_flows/#clients. -*Socket-util*:: Estimate of how many socket ports are used per client IP. This is approximately ~(100*active_flows/#clients)/64K, calculated as (average active flows per client*100/64K). Utilization of more than 50% means that TRex is generating too many flows per single client, and that more clients must be added. +*Socket-util*:: Estimation of number of L4 ports (sockets) used per client IP. This is approximately (100*active_flows/#clients)/64K, calculated as (average active flows per client*100/64K). Utilization of more than 50% means that TRex is generating too many flows per single client, and that more clients must be added in the generator config. // clarify above, especially the formula -*Max window*:: Momentary maximum latency for a time window of 500 msec. There are a few numbers per number of windows that are shown. - The newest number (last 500msec) is on the right. Oldest in the left. This can help identifying spikes of high latency clearing after some time. Maximum latency is the total maximum over the entire test duration. -//clarify above +*Max window*:: Momentary maximum latency for a time window of 500 msec. There are few numbers shown per port. + The newest number (last 500msec) is on the right. Oldest on the left. This can help identifying spikes of high latency clearing after some time. Maximum latency is the total maximum over the entire test duration. To best understand this, + run TRex with latency option (-l) and watch the results with this section in mind. -*Platform_factor*:: There are cases in which we duplicate the traffic using splitter/switch and we would like all numbers displayed to be multiplied by this factor (e.g. x2) -//clarify above +*Platform_factor*:: There are cases in which we duplicate the traffic using splitter/switch and we would like all numbers displayed by TRex to be multiplied by this factor, so that TRex counters will match the DUT counters. WARNING: If you don't see rx packets, revisit your MAC address configuration. -//clarify above - -==== Running TRex for the first time with ESXi: - -* Virtual NICs can be used to bridge between TRex and non-supported NICs, or for basic testing. Bandwidth is limited by vSwitch, has IPv6 issues. -// clarify, especially what IPv6 issues - -1. Click the host machine, enter Configuration -> Networking. - -a. One of the NICs should be connected to the main vSwitch network to get an "outside" connection, for the TRex client and ssh: + -image:images/vSwitch_main.png[title="vSwitch_main"] - -b. Other NICs that are used for TRex traffic should be in distinguish vSwitch: + -image:images/vSwitch_loopback.png[title="vSwitch_loopback"] - -2. Right-click guest machine -> Edit settings -> Ensure the NICs are set to their networks: + -image:images/vSwitch_networks.png[title="vSwitch_networks"] - - -[NOTE] -===================================================================== -Current limitation: The following command does not function as expected: -[subs="quotes"] -.... -sudo ./t-rex-64 -f cap2/dns.yaml *--lm 1 --lo* -l 1000 -d 100 -.... -The vSwitch does not "know" where to route the packet. This is expected to be fixed when TRex supports ARP. -===================================================================== - -* Pass-through is the way to use directly the NICs from host machine inside the VM. Has no limitations except the NIC/hardware itself. The only difference via bare-metal OS is occasional spikes of latency (~10ms). Passthrough settings cannot be saved to OVA. - -1. Click on the host machine. Enter Configuration -> Advanced settings -> Edit. Mark the desired NICs. Reboot the ESXi to apply. + -image:images/passthrough_marking.png[title="passthrough_marking"] - -2. Right click on guest machine. Edit settings -> Add -> *PCI device* -> Choose the NICs one by one. + -image:images/passthrough_adding.png[title="passthrough_adding"] - -==== Running TRex for the first time with router - -You can follow this presentation: link:trex_config_guide.html[first time TRex configuration] -or continue reading. -Without config file, TRex sets source MAC of all ports to `00:00:00:01:00:00` and expects to receive packets with this destination MAC address. -So, you just need to configure your router with static ARP entry pointing to the above MAC address. - -NOTE: Virtual routers on ESXi (for example, Cisco CSR1000v) must have distinct MAC address for each port. You need to specify the addresses in the configuration file. see more xref:trex_config[here]. Another example is TRex connected to a switch. In this case, each one of the TRex ports should have distinct MAC address. include::trex_book_basic.asciidoc[] @@ -673,7 +655,7 @@ This requires TRex to send the traffic on two VLANs, as described below. - duration : 0.1 vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 } <1> ---- -<1> Enable VLAN feature, valn0==100 , valn1==200 +<1> Enable VLAN feature, vlan0==100 , vlan1==200 *Problem definition:*:: @@ -792,8 +774,8 @@ Support for IPv6 includes: 1. Support for pcap files containing IPv6 packets 2. Ability to generate IPv6 traffic from pcap files containing IPv4 packets -The following switch enables this feature: `--ipv6` -Two new keywords (`src_ipv6`, `dst_ipv6`) have been added to the YAML file to specify the most significant 96 bits of the IPv6 address - for example: +The following command line option enables this feature: `--ipv6` +The keywords (`src_ipv6` and `dst_ipv6`) specify the most significant 96 bits of the IPv6 address - for example: [source,python] ---- @@ -802,13 +784,12 @@ Two new keywords (`src_ipv6`, `dst_ipv6`) have been added to the YAML file to sp ---- The IPv6 address is formed by placing what would typically be the IPv4 -address into the least significant 32-bits and copying the value provided -in the src_ipv6/dst_ipv6 keywords into the most signficant 96-bits. -If src_ipv6 and dst_ipv6 are not specified in the YAML file, the default -is to form IPv4-compatible addresses (where the most signifcant 96-bits -are zero). +address into the least significant 32 bits and copying the value provided +in the src_ipv6/dst_ipv6 keywords into the most signficant 96 bits. +If src_ipv6 and dst_ipv6 are not specified, the default +is to form IPv4-compatible addresses (most signifcant 96 bits are zero). -There is a support for all plugins (control flows that needed to be changed). +There is support for all plugins. *Example:*:: [source,bash] @@ -818,7 +799,8 @@ $sudo ./t-rex-64 -f cap2l/sfr_delay_10_1g.yaml -c 4 -p -l 100 -d 100000 -m 30 - *Limitations:*:: -* TRex cannot generate both IPv4 and IPv6 traffic. The `--ipv6` switch must be specified even when using a pcap file containing only IPv6 packets. +* TRex cannot generate both IPv4 and IPv6 traffic. +* The `--ipv6` switch must be specified even when using pcap file containing only IPv6 packets. *Router configuration:*:: @@ -856,42 +838,42 @@ asr1k(config)#ipv6 route 5000::/64 3001::2 <1> Enable IPv6 <2> Add pbr <3> Enable IPv6 routing -<4> MAC address setting should be like TRex +<4> MAC address setting. Should be TRex MAC. <5> PBR configuraion === Client clustering configuration -Trex supports testing a complex topology by a feature called "client clustering". -This feature allows a more detailed clustering of clients. +TRex supports testing complex topologies, using a feature called "client clustering". +This feature allows more detailed clustering of clients. -Let's assume the following topology: +Let's look at the following topology: image:images/client_clustering_topology.png[title="Client Clustering"] We would like to configure two clusters and direct traffic to them. -Using a config file, you can instruct TRex to generate clients +Using config file, you can instruct TRex to generate clients with specific configuration per cluster. -A cluster configuration includes: +Cluster configuration includes: -* IP start range -* IP end range -* Initator side configuration -* Responder side configuration +* IP start range. +* IP end range. +* Initiator side configuration. +* Responder side configuration. [NOTE] -It is important to state that this is *complimentry* to the client generator +It is important to understand that this is *complimentary* to the client generator configured per profile - it only defines how the generator will be clustered. -Let's take a look at an example: +Let's look at an example. -We have a profile which defines a client generator: +We have a profile defining client generator. [source,bash] ---- -$more cap2/dns.yaml +$cat cap2/dns.yaml - duration : 10.0 generator : distribution : "seq" @@ -904,7 +886,6 @@ $more cap2/dns.yaml dual_port_mask : "1.0.0.0" tcp_aging : 1 udp_aging : 1 - mac : [0x00,0x00,0x00,0x01,0x00,0x00] cap_info : - name: cap2/dns.pcap cps : 1.0 @@ -913,11 +894,10 @@ $more cap2/dns.yaml w : 1 ---- -We would like to create two clusters of 4 devices each. -We would also like to divide *80%* of the traffic to the upper cluster -and *20%* to the lower cluster. +We want to create two clusters with 4 devices each. +We also want to divide *80%* of the traffic to the upper cluster and *20%* to the lower cluster. -We create a cluster configuration file in YAML: +We will create the following cluster configuration file. [source,bash] ---- @@ -925,29 +905,29 @@ We create a cluster configuration file in YAML: # Client configuration example file # The file must contain the following fields # -# 'vlan' - is the entire configuration under VLAN -# if so, each client group must include vlan +# 'vlan' - if the entire configuration uses VLAN, +# each client group must include vlan # configuration # -# 'groups' - each client group must contain a range of IP -# and initiator and responder maps -# 'count' represents the number of MAC devices -# on the group. +# 'groups' - each client group must contain range of IPs +# and initiator and responder section +# 'count' represents the number of different MACs +# addresses in the group. # # initiator and responder can contain 'vlan', 'src_mac', 'dst_mac' # # each group contains a double way VLAN configuration -vlan: true <1> +vlan: true groups: -- ip_start : 16.0.0.1 <2> +- ip_start : 16.0.0.1 ip_end : 16.0.0.204 - initiator : <3> + initiator : vlan : 100 dst_mac : "00:00:00:01:00:00" - responder : <4> + responder : vlan : 200 dst_mac : "00:00:00:01:00:00" @@ -968,16 +948,16 @@ groups: ---- The above configuration will divide the generator range of 255 clients to two clusters, -where each has 4 devices and VLAN on both ways. +each with 4 devices and VLAN in both directions. -MACs will be allocated incrementaly with a wrap around. +MACs will be allocated incrementaly, with a wrap around. e.g. * 16.0.0.1 --> 00:00:00:01:00:00 * 16.0.0.2 --> 00:00:00:01:00:01 -* 16.0.0.3 --> 00:00:00:01:00:03 -* 16.0.0.4 --> 00:00:00:01:00:04 +* 16.0.0.3 --> 00:00:00:01:00:02 +* 16.0.0.4 --> 00:00:00:01:00:03 * 16.0.0.5 --> 00:00:00:01:00:00 * 16.0.0.6 --> 00:00:00:01:00:01 @@ -987,7 +967,7 @@ and so on. [source,bash] ---- -sudo ./t-rex-64 -f cap2/dns.yaml --client_cfg my_cfg.yaml -c 4 -d 100 +sudo ./t-rex-64 -f cap2/dns.yaml --client_cfg my_cfg.yaml ---- === NAT support @@ -1023,11 +1003,11 @@ This mode can give much better connections per second performance than mode 1 (s $sudo ./t-rex-64 -f cap2/http_simple.yaml -c 4 -l 1000 -d 100000 -m 30 --learn-mode 1 ---- -*SFR traffic without bundeling/ALG support* +*SFR traffic without bundling/ALG support* [source,bash] ---- -$sudo ./t-rex-64 -f avl/sfr_delay_10_1g_no_bundeling.yaml -c 4 -l 1000 -d 100000 -m 10 --learn-mode 2 +$sudo ./t-rex-64 -f avl/sfr_delay_10_1g_no_bundling.yaml -c 4 -l 1000 -d 100000 -m 10 --learn-mode 2 ---- *NAT terminal counters:*:: @@ -1051,7 +1031,7 @@ $sudo ./t-rex-64 -f avl/sfr_delay_10_1g_no_bundeling.yaml -c 4 -l 1000 -d 10000 *Configuration for Cisco ASR1000 Series:*:: -This feature was tested with the following configuration and sfr_delay_10_1g_no_bundeling. yaml traffic profile. +This feature was tested with the following configuration and sfr_delay_10_1g_no_bundling. yaml traffic profile. Client address range is 16.0.0.1 to 16.0.0.255 [source,python] @@ -1093,25 +1073,27 @@ access-list 8 permit 17.0.0.0 0.0.0.255 *Limitations:*:: -. The IPv6-IPv6 NAT feature does not exist on routers, so this feature can work on IPv4 only. +. The IPv6-IPv6 NAT feature does not exist on routers, so this feature can work only with IPv4. . Does not support NAT64. -. Bundling/plugin support is not fully supported. Consequently, sfr_delay_10.yaml does not work. Use sfr_delay_10_no_bundeling.yaml instead. -// verify file name "sfr_delay_10_no_bundeling.yaml" above. english spelling is bundling but maybe the filename has the "e" +. Bundling/plugin is not fully supported. Consequently, sfr_delay_10.yaml does not work. Use sfr_delay_10_no_bundling.yaml instead. [NOTE] ===================================================================== * `--learn-verify` is a TRex debug mechanism for testing the TRex learn mechanism. -* If the router is configured without NAT, it will verify that the inside_ip==outside_ip and inside_port==outside_port. +* Need to run it when DUT is configured without NAT. It will verify that the inside_ip==outside_ip and inside_port==outside_port. ===================================================================== === Flow order/latency verification -In normal mode (without this feature enabled), received traffic is not checked by software. Hardware (Intel NIC) testin for dropped packets occurs at the end of the test. The only exception is the Latency/Jitter packets. +In normal mode (without this feature enabled), received traffic is not checked by software. Hardware (Intel NIC) testing for dropped packets occurs at the end of the test. The only exception is the Latency/Jitter packets. This is one reason that with TRex, you *cannot* check features that terminate traffic (for example TCP Proxy). To enable this feature, add `--rx-check ` to the command line options, where is the sample rate. The number of flows that will be sent to the software for verification is (1/(sample_rate). For 40Gb/sec traffic you can use a sample rate of 1/128. Watch for Rx CPU% utilization. - INFO: This feature changes the TTL of the sampled flows to 255 and expects to receive packets with TTL 254 or 255 (one routing hop). If you have more than one hop in your setup, use `--hops` to change it to a higher value. More than one hop is possible if there are number of routers betwean TRex client side and TRex server side. +[NOTE] +============ +This feature changes the TTL of the sampled flows to 255 and expects to receive packets with TTL 254 or 255 (one routing hop). If you have more than one hop in your setup, use `--hops` to change it to a higher value. More than one hop is possible if there are number of routers betwean TRex client side and TRex server side. +============ This feature ensures that: @@ -1201,7 +1183,6 @@ Cpu Utilization : 0.1 % *Notes and Limitations:*:: -** This feature must be enabled with a latency check (`-l`). ** To receive the packets TRex does the following: *** Changes the TTL to 0xff and expects 0xFF (loopback) or oxFE (route). (Use `--hop` to configure this value.) *** Adds 24 bytes of metadata as ipv4/ipv6 option header. @@ -1235,7 +1216,7 @@ Cpu Utilization : 0.1 % mac_override_by_ip : true <8> ---- <1> Test duration (seconds). Can be overridden using the `-d` option. -<2> See the generator section. +<2> See the link:trex_manual.html#_clients_servers_ip_allocation_scheme[generator] section. // what does note 2 mean? see somewhere else? isn't this simply the generator section? <3> Default source/destination MAC address. The configuration YAML can override this. <4> true (default) indicates that the IPG is taken from the cap file (also taking into account cap_ipg_min and cap_override_ipg if they exist). false indicates that IPG is taken from per template section. @@ -1273,55 +1254,62 @@ Cpu Utilization : 0.1 % anchor:trex_config[] The configuration file, in YAML format, configures TRex behavior, including: - -- MAC address for each port (source and destination) -- Masking interfaces (usually for 1Gb/Sec TRex) to ensure that TRex does not take the management ports as traffic ports. +- IP address or MAC address for each port (source and destination). +- Masked interfaces, to ensure that TRex does not try to use the management ports as traffic ports. - Changing the zmq/telnet TCP port. -==== Basic Configuration - -Copy/install the configuration file to `/etc/trex_cfg.yaml`. -TRex loads it automatically at startup. You still can override options with the command line option switch `--cfg [file]` in the CLI -Configuration file examples can be found in the `$ROOT/cfg` folder +You specify which config file to use by adding --cfg to the command line arguments. + +If no --cfg given, the default `/etc/trex_cfg.yaml` is used. + +Configuration file examples can be found in the `$TREX_ROOT/scripts/cfg` folder. +==== Basic Configurations [source,python] ---- - - port_limit : 2 #mandatory <1> - version : 2 #mandatory <2> - interfaces : ["03:00.0","03:00.1"] #mandatory <3> - #enable_zmq_pub : true <4> - #zmq_pub_port : 4500 <5> - #prefix : setup1 <6> - #limit_memory : 1024 <7> - c : 4 <8> - port_bandwidth_gb : 10 <9> - port_info : # set eh mac addr mandatory - - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] # port 0 <10> - src_mac : [0x2,0x0,0x0,0x2,0x0,0x00] - - dest_mac : [0x3,0x0,0x0,0x3,0x0,0x00] # port 1 - src_mac : [0x4,0x0,0x0,0x4,0x0,0x00] - - dest_mac : [0x5,0x0,0x0,0x5,0x0,0x00] # port 2 - src_mac : [0x6,0x0,0x0,0x6,0x0,0x00] - - dest_mac : [0x7,0x0,0x0,0x7,0x0,0x01] # port 3 - src_mac : [0x0,0x0,0x0,0x8,0x0,0x02] - - dest_mac : [0x0,0x0,0x0,0x9,0x0,0x03] # port 4 ----- -<1> The number of ports, should be equal to the number of interfaces in 3) - mandatory -<2> Must be set to 2 - mandatory -<3> Interface that should be used. used `sudo ./dpdk_setup_ports.py --show` - mandatory + - port_limit : 2 #mandatory <1> + version : 2 #mandatory <2> + interfaces : ["03:00.0", "03:00.1"] #mandatory <3> + #enable_zmq_pub : true #optional <4> + #zmq_pub_port : 4500 #optional <5> + #prefix : setup1 #optional <6> + #limit_memory : 1024 #optional <7> + c : 4 #optional <8> + port_bandwidth_gb : 10 #optional <9> + port_info : # set eh mac addr mandatory + - default_gw : 1.1.1.1 # port 0 <10> + dest_mac : '00:00:00:01:00:00' # Either default_gw or dest_mac is mandatory <10> + src_mac : '00:00:00:02:00:00' # optional <11> + ip : 2.2.2.2 # optional <12> + vlan : 15 # optional <13> + - dest_mac : '00:00:00:03:00:00' # port 1 + src_mac : '00:00:00:04:00:00' + - dest_mac : '00:00:00:05:00:00' # port 2 + src_mac : '00:00:00:06:00:00' + - dest_mac : [0x0,0x0,0x0,0x7,0x0,0x01] # port 3 <14> + src_mac : [0x0,0x0,0x0,0x8,0x0,0x02] # <14> +---- +<1> Number of ports. Should be equal to the number of interfaces listed in 3. - mandatory +<2> Must be set to 2. - mandatory +<3> List of interfaces to use. Run `sudo ./dpdk_setup_ports.py --show` to see the list you can choose from. - mandatory <4> Enable the ZMQ publisher for stats data, default is true. -<5> ZMQ port number. the default value is good. you can remove this line - -<6> The name of the setup should be distinct ( DPDK --file-prefix ) -<7> DPDK -m limit the packet memory -<8> Number of threads per dual interface ( like -c CLI option ) -<9> The bandwidth of each interface in Gb/sec. In this example we have 10Gb/sec interfaces. for VM put 1. it used to tune the amount of memory allocated by TRex. -<10> MAC address per port - source and destination. - - -To find out what the interfaces ids, perform the following: +<5> ZMQ port number. Default value is good. If running two TRex instances on the same machine, each should be given distinct number. Otherwise, can remove this line. +<6> If running two TRex instances on the same machine, each should be given distinct name. Otherwise, can remove this line. ( Passed to DPDK as --file-prefix arg) +<7> Limit the amount of packet memory used. (Passed to dpdk as -m arg) +<8> Number of threads (cores) TRex will use per interface pair ( Can be overridden by -c command line option ) +<9> The bandwidth of each interface in Gbs. In this example we have 10Gbs interfaces. For VM, put 1. Used to tune the amount of memory allocated by TRex. +<10> TRex need to know the destination MAC address to use on each port. You can specify this in one of two ways: + +Specify dest_mac directly. + +Specify default_gw (since version 2.10). In this case (only if no dest_mac given), TRex will issue ARP request to this IP, and will use +the result as dest MAC. If no dest_mac given, and no ARP response received, TRex will exit. + +<11> Source MAC to use when sending packets from this interface. If not given (since version 2.10), MAC address of the port will be used. +<12> If given (since version 2.10), TRex will issue gratitues ARP for the ip + src MAC pair on appropriate port. In stateful mode, +gratitues ARP for each ip will be sent every 120 seconds (Can be changed using --arp-refresh-period argument) +<13> If given, gratitues ARP and ARP request will be sent using the given VLAN tag. +<14> Old MAC address format. New format is supported since version v2.09. + +To find out which interfaces (NIC ports) can be used, perform the following: [source,bash] ---- @@ -1332,21 +1320,18 @@ To find out what the interfaces ids, perform the following: Network devices using kernel driver =================================== - 0000:02:00.0 '82545EM Gigabit Ethernet Controller (Copper)' if=eth2 drv=e1000 unused=igb_uio *Active* - 0000:03:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<1> - 0000:03:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<2> - 0000:13:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<3> - 0000:13:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<4> + 0000:02:00.0 '82545EM Gigabit Ethernet Controller' if=eth2 drv=e1000 unused=igb_uio *Active* #<1> + 0000:03:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb #<2> + 0000:03:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb + 0000:13:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb + 0000:13:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv= unused=ixgb Other network devices ===================== ---- -<1> TRex interface #1 before unbinding -<2> TRex interface #2 before unbinding -<3> TRex interface #3 before unbinding -<4> TRex interface #4 before unbinding - +<1> We see that 02:00.0 is active (our management port). +<2> All other NIC ports (03:00.0, 03:00.1, 13:00.0, 13:00.1) can be used. minimum configuration file is: @@ -1354,16 +1339,15 @@ minimum configuration file is: ---- - port_limit : 4 - version : 2 #<1> - interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] #<2> + version : 2 + interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] ---- -<1> must add version 2 to the configuration file -<2> The list of interfaces from `sudo ./dpdk_setup_ports.py --show` - ==== Memory section configuration -The memory section is optional. It is used when there is a need to tune the amount of memory used by packet manager +The memory section is optional. It is used when there is a need to tune the amount of memory used by TRex packet manager. +Default values (from the TRex source code), are usually good for most users. Unless you have some unusual needs, you can +eliminate this section. [source,python] ---- @@ -1386,11 +1370,16 @@ The memory section is optional. It is used when there is a need to tune the amou dp_flows : 1048576 <4> global_flows : 10240 <5> ---- -<1> Memory section -<2> Per dual interfaces number of buffers - buffer for real time traffic generation -<3> Traffic buffer - when you have many template only this section should be enlarge -<4> number of TRex flows needed -<5> reserved +<1> Memory section header +<2> Numbers of memory buffers allocated for packets in transit, per port pair. Numbers are specified per packet size. +<3> Numbers of memory buffers allocated for holding the part of the packet which is remained unchanged per template. +You should increase numbers here, only if you have very large amount of templates. +<4> Number of TRex flow objects allocated (To get best performance they are allocated upfront, and not dynamically). +If you expect more concurrent flows than the default (1048576), enlarge this. +<5> Number objects TRex allocates for holding NAT ``in transit'' connections. In stateful mode, TRex learn NAT +translation by looking at the address changes done by the DUT to the first packet of each flow. So, these are the +number of flows for which TRex sent the first flow packet, but did not learn the translation yet. Again, default +here (10240) should be good. Increase only if you use NAT and see issues. ==== Platform section configuration @@ -1401,36 +1390,32 @@ a configuration file now has the folowing struct to support multi instance [source,python] ---- - version : 2 - interfaces : ["03:00.0","03:00.1"] + interfaces : ["03:00.0","03:00.1"] port_limit : 2 - prefix : setup1 <1> - limit_memory : 1024 <2> - c : 4 <3> - port_bandwidth_gb : 10 <4> - platform : <5> - master_thread_id : 0 <6> - latency_thread_id : 5 <7> - dual_if : - - socket : 0 <8> - threads : [1,2,3,4] <9> ----- -<1> The name of the setup should be distinct ( DPDK --file-prefix ) -<2> DPDK -m -<3> Number of threads per dual interface ( like -c CLI option ) -<4> The bandwidth of each interface in Gb/sec. In this example we have 10Gb/sec interfaces. for VM put 1. it used to tune the amount of memory allocated by TRex. -<5> the platform section -<6> The thread_id for control -<7> The thread_id for latency if used -<8> Socket of the dual interfaces, in this example of 03:00.0 and 03:00.1, memory should be local to the interface. (Currently dual interface can't use 2 NUMAs.) -<9> Thread to be used, should be local to the NIC. The threads are pinned to cores, thus specifying threads is like specifying cores. +.... + platform : <1> + master_thread_id : 0 <2> + latency_thread_id : 5 <3> + dual_if : <4> + - socket : 0 <5> + threads : [1,2,3,4] <6> +---- +<1> Platform section header. +<2> Hardware thread_id for control thread. +<3> Hardware thread_id for RX thread. +<4> ``dual_if'' section defines info for interface pairs (according to the order in ``interfaces'' list). +each section, starting with ``- socket'' defines info for different interface pair. +<5> The NUMA node from which memory will be allocated for use by the interface pair. +<6> Hardware threads to be used for sending packets for the interface pair. Threads are pinned to cores, so specifying threads +actually determines the hardware cores. *Real example:* anchor:numa-example[] -We've connected 2 Intel XL710 NICs close to each other on motherboard, they shared same NUMA: +We connected 2 Intel XL710 NICs close to each other on the motherboard. They shared the same NUMA: image:images/same_numa.png[title="2_NICSs_same_NUMA"] -The CPU utilization was very high ~100%, with c=2 and c=4 the results were same. +CPU utilization was very high ~100%, with c=2 and c=4 the results were same. Then, we moved the cards to different NUMAs: @@ -1455,102 +1440,117 @@ This gave best results: with *\~98 Gb/s* TX BW and c=7, CPU utilization became * anchor:cml-line[] -*-f=TRAFIC_YAML_FILE*:: - Traffic YAML configuration file. +*--allow-coredump*:: +Allow creation of core dump. + +*--arp-refresh-period *:: +Period in seconds between sending of gratuitous ARP for our addresses. Value of 0 means ``never send``. + +*-c *:: +Number of hardware threads to use per interface pair. Use at least 4 for TRex 40Gbs. + +TRex uses 2 threads for inner needs. Rest of the threads can be used. Maximum number here, can be number of free threads +divided by number of interface pairs. + +For virtual NICs on VM, we always use one thread per interface pair. + +*--cfg *:: +TRex configuration file to use. See relevant manual section for all config file options. -*-c=CORES*:: - Number of cores _per dual interface_. Use 4 for TRex 40Gb/sec. Monitor the CPU% of TRex - it should be ~50%. + - TRex uses 2 cores for inner needs, the rest of cores can be used divided by number of dual interfaces. + - For virtual NICs the limit is -c=1. +*--checksum-offload*:: +Enable IP, TCP and UDP tx checksum offloading, using DPDK. This requires all used interfaces to support this. -*-l=HZ*:: - Run the latency daemon in this Hz rate. Example: -l 1000 runs 1000 pkt/sec from each interface. A value of zero (0) disables the latency check. +*--client_cfg *:: +YAML file describing clients configuration. Look link:trex_manual.html#_client_clustering_configuration[here] for details. -*-d=DURATION*:: - Duration of the test (sec), Default: 0 +*-d *:: +Duration of the test in seconds. -*-m=MUL*:: - Factor for bandwidth (multiply the CPS of each template by this value). +*-e*:: + Same as `-p`, but change the src/dst IP according to the port. Using this, you will get all the packets of the + same flow from the same port, and with the same src/dst IP. + + It will not work good with NBAR as it expects all clients ip to be sent from same direction. + +*-f *:: +Specify traffic YAML configuration file to use. Mandatory option for stateful mode. + +*--hops *:: + Provide number of hops in the setup (default is one hop). Relevant only if the Rx check is enabled. + Look link:trex_manual.html#_flow_order_latency_verification[here] for details. + +*--iom *:: + I/O mode. Possible values: 0 (silent), 1 (normal), 2 (short). *--ipv6*:: - Convert template to IPv6 mode. + Convert templates to IPv6 mode. + +*-k *:: + Run ``warm up'' traffic for num seconds before starting the test. This is needed if TRex is connected to switch running + spanning tree. You want the switch to see traffic from all relevant source MAC addresses before starting to send real + data. Traffic sent is the same used for the latency test (-l option) + + Current limitation (holds for TRex version 1.82): does not work properly on VM. -*--learn-mode *:: - Learn the dynamic NAT translation. + - 1 - Use TCP ACK in first SYN to pass NAT translation information. Will work only for TCP streams. Initial SYN packet must be present in stream. + - 2 - Add special IP option to pass NAT translation information. Will not work on certain firewalls if they drop packets with IP options. +*-l *:: + In parallel to the test, run latency check, sending packets at rate/sec from each interface. + +*--learn-mode *:: + Learn the dynamic NAT translation. Look link:trex_manual.html#_nat_support[here] for details. *--learn-verify*:: - Learn the translation. This feature is intended for verification of the mechanism in cases where there is no NAT. - -*-p*:: - Flow-flip. Sends all flow packets from the same interface. This can solve the flow order. Does not work with any router configuration. + Used for testing the NAT learning mechanism. Do the learning as if DUT is doing NAT, but verify that packets + are not actually changed. -*-e*:: - same as `-p` but comply to the direction rules and replace source/destination IPs. it might not be good for NBAR as it is expected clients ip to be sent from same direction. +*--limit-ports *:: + Limit the number of ports used. Overrides the ``port_limit'' from config file. -//TBD: The last 2 sentences (flow order, router configuration) are unclear. +*--lm *:: +Mask specifying which ports will send traffic. For example, 0x1 - Only port 0 will send. 0x4 - only port 2 will send. +This can be used to verify port connectivity. You can send packets from one port, and look at counters on the DUT. - -*--lm=MASK*:: - Latency mask. Use this to verify port connectivity. Possible values: 0x1 (only port 0 will send traffic), 0x2 (only port 1 will send traffic). +*--lo*:: + Latency only - Send only latency packets. Do not send packets from the templates/pcap files. -*--lo*:: - Latency test. - -*--limit-ports=PORTS*:: - Limit number of ports. Configure this in the --cfg file. Possible values (number of ports): 2, 4, 6, 8. (Default: 4) - -*--nc*:: - If set, will terminate exacly at the end of the duration. This provides a faster, more accurate TRex termination. In default it wait for all the flow to terminate gracefully. In case of a very long flow the termination might be prolong. +*-m *:: + Rate multiplier. TRex will multiply the CPS rate of each template by num. + +*--mac-spread*:: + Spread the destination mac by this this factor. e.g 2 will generate the traffic to 2 devices DEST-MAC ,DEST-MAC+1. The maximum is up to 128 devices. + +*--nc*:: + If set, will terminate exacly at the end of the specified duration. + This provides faster, more accurate TRex termination. + By default (without this option), TRex waits for all flows to terminate gracefully. In case of a very long flow, termination might prolong. + +*--no-flow-control-change*:: + Prevents TRex from changing flow control. By default (without this option), TRex disables flow control at startup for all cards, except for the Intel XL710 40G card. + +*--no-key*:: Daemon mode, don't get input from keyboard. + +*--no-watchdog*:: Disable watchdog. -*-pm=MULTIFLIER*:: - Platform factor. If the setup includes a splitter, you can multiply the total results by this factor. Example: --pm 2.0 will multiply all bps results by this factor. +*-p*:: +Send all packets of the same flow from the same direction. For each flow, TRex will randomly choose between client port and +server port, and send all the packets from this port. src/dst IPs keep their values as if packets are sent from two ports. +Meaning, we get on the same port packets from client to server, and from server to client. + +If you are using this with a router, you can not relay on routing rules to pass traffic to TRex, you must configure policy +based routes to pass all traffic from one DUT port to the other. + + +*-pm *:: + Platform factor. If the setup includes splitter, you can multiply all statistic number displayed by TRex by this factor, so that they will match the DUT counters. *-pubd*:: - Disable ZMQ monitor's publishers. + Disable ZMQ monitor's publishers. -*-1g*:: - Deprecated. Configure TRex to 1G. Configure this in the --cfg file. - -*-k=SEC*:: - Run a latency test before starting the test. TRex will wait for x sec before and after sending latency packets at startup. - Current limitation (holds for TRex version 1.82): does not work properly on VM. +*--rx-check *:: + Enable Rx check module. Using this, each thread randomly samples 1/sample_rate of the flows and checks packet order, latency, and additional statistics for the sampled flows. + Note: This feature works on the RX thread. -*-w=SEC*:: - Wait additional time between NICs initialization and sending traffic. Can be useful if DUT needs extra setup time. Default is 1 second. - -*--cfg=platform_yaml*:: - Load and configure platform using this file. See example file: cfg/cfg_examplexx.yaml - This file is used to configure/mask interfaces, cores, affinity, and MAC addresses. - You can use the example file by copying it to: /etc/trex_cfg.yaml - - -*-v=VERBOSE*:: - Verbose mode (works only on the debug image! ) - 1 Show only stats. - 2 Run preview. Does not write to file. - 3 Run preview and write to stats file. - Note: When using verbose mode, it is not necessary to add an output file. - Caution: Operating in verbose mode can generate very large files (terabytes). Use with caution, only on a local drive. - - -*--rx-check=SAMPLE_RATE*:: - Enable Rx check module. Using this each thread samples flows (1/sample) and checks order, latency, and additional statistics. - Note: This feature operates as an additional thread. - -*--hops=HOPES*:: - Number of hops in the setup (default is one hop). Relevant only if the Rx check is enabled. - -*--iom=MODE*:: - I/O mode for interactive mode. Possible values: 0 (silent), 1 (normal), 2 (short) - -*--no-flow-control-change*:: - Prevents TRex from changing flow control. By default (without this option), TRex disables flow control at startup for all cards, except for the Intel XL710 40G card. +*-v *:: + Show debug info. Value of 1 shows debug info on startup. Value of 3, shows debug info during run at some cases. Might slow down operation. -*--mac-spread*:: - Spread the destination mac by this this factor. e.g 2 will generate the traffic to 2 devices DEST-MAC ,DEST-MAC+1. The maximum is up to 128 devices. +*--vlan*:: Relevant only for stateless mode with Intel 82599 10G NIC. + When configuring flow stat and latency per stream rules, assume all streams uses VLAN. +*-w *:: + Wait additional time between NICs initialization and sending traffic. Can be useful if DUT needs extra setup time. Default is 1 second. ifndef::backend-docbook[] @@ -1561,8 +1561,8 @@ endif::backend-docbook[] === Simulator -The TRex simulator is a linux application that can process on any Linux CEL (it can run on TRex itself). -you can create create output pcap file from input of traffic YAML. +The TRex simulator is a linux application (no DPDK needed) that can run on any Linux (it can also run on TRex machine itself). +you can create output pcap file from input of traffic YAML. ==== Simulator diff --git a/trex_book_basic.asciidoc b/trex_book_basic.asciidoc index 5c8af732..9e376366 100755 --- a/trex_book_basic.asciidoc +++ b/trex_book_basic.asciidoc @@ -8,13 +8,13 @@ This simulator can be run on any Cisco Linux including on the TRex itself. TRex simulates clients and servers and generates traffic based on the pcap files provided. .Clients/Servers -image:images/trex_model.png[title="generator"] +image:images/trex_model.png[title=""] The following is an example YAML-format traffic configuration file (cap2/dns_test.yaml), with explanatory notes. [source,python] ---- -$more cap2/dns_test.yaml +$cat cap2/dns_test.yaml - duration : 10.0 generator : distribution : "seq" @@ -27,7 +27,6 @@ $more cap2/dns_test.yaml dual_port_mask : "1.0.0.0" tcp_aging : 1 udp_aging : 1 - mac : [0x00,0x00,0x00,0x01,0x00,0x00] cap_info : - name: cap2/dns.pcap <3> cps : 1.0 <4> @@ -43,7 +42,7 @@ $more cap2/dns_test.yaml <6> Should be the same as ipg. .DNS template file -image:images/dns_wireshark.png[title="generator"] +image:images/dns_wireshark.png[title=""] The DNS template file includes: @@ -109,6 +108,7 @@ gives //TBD: Not sure what the output looks like here, with this line showing only "gives" .TRex generated output file +//??? missing picture image:images/dns_trex_run.png[title="generator"] As the output file shows... @@ -245,7 +245,6 @@ Use the following to display the output as a chart, with: x axis: time (seconds) y axis: flow ID The output indicates that there are 10 flows in 1 second, as expected, and the IPG is 50 msec + -//TBD: not sure what the "+ +" means ==> [hh] Ascii Doc break page ifndef::backend-docbook[] +++++++++++++++++++++++++++++++++ @@ -514,7 +513,7 @@ you need to be sure that you have enogth free sockets when running TRex in high Open-flows : 1 Servers : 254 Socket : 1 Socket/Clients : 0.0 drop-rate : 0.00 bps ---- -<1> Number of clients +<1> Number of clients <2> sockets utilization (should be lowwer than 20%, elarge the number of clients in case of an issue). === DNS, W=1 @@ -577,7 +576,7 @@ pkt,time sec,fid,flow-pkt-id,client_ip,client_port,server_ip ,desc |================= -=== Mixing HTTP and DNS template +=== Mixing HTTP and DNS templates The following example combines elements of HTTP and DNS templates: @@ -2621,32 +2620,15 @@ chart("#chart4",sfr_data,sfr_names,"time-sec","flow-id"); endif::backend-docbook[] -=== TRex command line +=== Running examples -TRex commands typically include the following main arguments, but only `-f` and `-d` are required. +TRex commands typically include the following main arguments, but only `-f` is required. [source,bash] ---- -$.sudo /t-rex-64 -f [traffic_yaml] -m [muti] -d [duration] -l [Hz=1000] -c [cores] +$.sudo /t-rex-64 -f -m -d -l -c ---- - -*-f=TRAFIC_YAML_FILE*:: - YAML traffic configuration file. - -*-m=MUL*:: - Factor for bandwidth (multiplies the CPS of each template by this value). - -*-d=DURATION*:: - Duration of the test (sec). Default: 0 - -*-l=HZ*:: - Rate (Hz) for running the latency daemon. Example: -l 1000 runs 1000 pkt/sec from each interface. A value of zero (0) disables the latency check. - -*-c=CORES*:: - Number of cores. Use 4 for TRex 40Gb/sec. Monitor the CPU% of TRex - it should be ~50%. - - -The full reference can be found xref:cml-line[here] +Full command line reference can be found xref:cml-line[here] ==== TRex command line examples @@ -2723,11 +2705,11 @@ $.sudo /t-rex-64 -f cap2/imix_64.yaml -c 4 -m 1 -d 100 -l 1000 === Mimicking stateless traffic under stateful mode [NOTE] -TRex now supports a true stateless traffic generation. +TRex supports also true stateless traffic generation. If you are looking for stateless traffic, please visit the following link: xref:trex_stateless.html[TRex Stateless Support] With this feature you can "repeat" flows and create stateless, *IXIA* like streams. -After injecting the number of flows defined by `limit`, TRex repeats the same flows. If all template has a `limit` the CPS will be zero after a time as there are no new flows after the first iteration. +After injecting the number of flows defined by `limit`, TRex repeats the same flows. If all templates have `limit` the CPS will be zero after some time as there are no new flows after the first iteration. *IMIX support:*:: Example: @@ -2739,9 +2721,9 @@ $sudo ./t-rex-64 -f cap2/imix_64.yaml -d 1000 -m 40000 -c 4 -p [WARNING] ===================================================================== The *-p* is used here to send the client side packets from both interfaces. -(Normally it is sent only from client ports only.) -Typically, the traffic client side is sent from the TRex client port; with this option, the port is selected by the client IP. -All the flow packets are sent from the same interface. This may create an issue with routing, as the client's IP will be sent from the server interface. PBR router configuration solves this issue but cannot be used in all cases. So use this `-p` option carefully. +(Normally it is sent from client ports only.) +With this option, the port is selected by the client IP. +All the packets of a flow are sent from the same interface. This may create an issue with routing, as the client's IP will be sent from the server interface. PBR router configuration solves this issue but cannot be used in all cases. So use this `-p` option carefully. ===================================================================== @@ -2817,7 +2799,7 @@ It is possible to mix stateless templates and stateful templates. w : 1 limit : 199 ---- -The templates are duplicate here to better utilize DRAM and to get better performance. +The templates are duplicated here to better utilize DRAM and to get better performance. //TBD: What exactly repeates the templates - TRex, script, ? Also, how does that better utilize DRAM. .Imix YAML `cap2/imix_fast_1g_100k_flows.yaml` example @@ -3001,8 +2983,8 @@ endif::backend-docbook[] === Clients/Servers IP allocation scheme -Currently, there is one global IP pool for clients and servers. It serves all templates. all the templates will allocate IP from this global pool. -Each TRex client/server "dual-port" (pair of ports, such as port 0 for client, port 1 for server) has it own mask offset taken from the YAML. The mask offset is called `dual_port_mask`. +Currently, there is one global IP pool for clients and servers. It serves all templates. All templates will allocate IP from this global pool. +Each TRex client/server "dual-port" (pair of ports, such as port 0 for client, port 1 for server) has its own generator offset, taken from the config file. The offset is called `dual_port_mask`. Example: @@ -3018,37 +3000,32 @@ generator : tcp_aging : 0 udp_aging : 0 ---- -<1> Mask to add per dual-port pair. -The reason we introduce dual_port_mask is to make static route configurable. With this mask, different ports has different prefix. +<1> Offset to add per port pair. +The reason for the ``dual_port_mask'' is to make static route configuration per port possible. With this offset, different ports have different prefixes. -//TBD: needs clarification - this is the format of a port mask? - -With four ports, TRex produces the following output: +For example, with four ports, TRex will produce the following ip ranges: [source,python] ---- - dual-0 (0,1) --> C (16.0.0.1-16.0.0.128 ) <-> S( 48.0.0.1 - 48.0.0.128) - dual-1 (2,3) --> C (17.0.0.129-17.0.0.255 ) <-> S( 49.0.0.129 - 49.0.0.255) + mask ("1.0.0.0") + port pair-0 (0,1) --> C (16.0.0.1-16.0.0.128 ) <-> S( 48.0.0.1 - 48.0.0.128) + port pair-1 (2,3) --> C (17.0.0.129-17.0.0.255 ) <-> S( 49.0.0.129 - 49.0.0.255) + mask ("1.0.0.0") ---- -In the case of setting dual-port_mask as 0.0.0.0, both ports will use the same range of ip. -With four ports and dual_port_mask as 0.0.0.0, the ip range is : +- Number of clients : 255 +- Number of servers : 255 +- The offset defined by ``dual_port_mask'' (1.0.0.0) is added for each port pair, but the total number of clients/servers will remain constant (255), and will not depend on the amount of ports. +- TCP/UDP aging is the time it takes to return the socket to the pool. It is required when the number of clients is very small and the template defines a very long duration. +//TBD: not clear - is TCP/UDP aging an option used when the template defines a long duration? also, should specify what "very long" refers to. + +If ``dual-port_mask'' was set to 0.0.0.0, both port pairs would have uses the same ip range. +For example, with four ports, we would have get the following ip range is : [source,python] ---- - dual-0 (0,1) --> C (16.0.0.1-16.0.0.128 ) <-> S( 48.0.0.1 - 48.0.0.128) - dual-1 (2,3) --> C (16.0.0.129-16.0.0.255 ) <-> S( 48.0.0.129 - 48.0.0.255) + port pair-0 (0,1) --> C (16.0.0.1-16.0.0.128 ) <-> S( 48.0.0.1 - 48.0.0.128) + port pair-1 (2,3) --> C (16.0.0.129-16.0.0.255 ) <-> S( 48.0.0.129 - 48.0.0.255) ---- -//TBD: not clear what the following 5 points are referring to. This looks like it should be a continuation of the footnotes for the example a few lines up. -- Number of clients : 255 -- Number of servers : 255 -- The mask defined by dual_port_mask (1.0.0.0) is added for each dual-port pair, but the total number of clients/servers from YAML will be constant and does not depend on the amount of dual ports. -- TCP/UDP aging is required when the number of clients is very small and the template defines a very long duration. -This is the time it takes to return the socket to the pool. -//TBD: not clear - is TCP/UDP aging an option used when the template defines a long duration? also, should specify what "very long" refers to. -- In the current version, the only option for distribution is "seq". - *Router configuration for this mode:*:: @@ -3092,8 +3069,8 @@ ip route 49.0.0.0 255.0.0.0 33.11.11.12 *One server:*:: -To support a template with one server, you can add a new YAML server_addr ip. Each dual-port pair will be assigned a separate server (in compliance with the mask). -//TBD: clarify +To support a template with one server, you can add ``server_addr'' keyword. Each port pair will be get different server IP +(According to the ``dual_port_mask'' offset). [source,python] ---- @@ -3106,16 +3083,13 @@ To support a template with one server, you can add a new YAML server_addr ip. Ea one_app_server : true <2> wlength : 1 ---- -<1> Server IPv4 address. +<1> Server IP. <2> Enable one server mode. -*w/wlength:*:: -//TBD: looks like this should be a continuation of the footnotes as in 1 and 2 above. +// TBD - what is wlength??? -not require to configure them, user 1 -//TBD: ? - -*new statistic:*:: +In TRex server, you will see the following statistics. +// TBD - need to explain this [source,python] ---- @@ -3127,21 +3101,20 @@ not require to configure them, user 1 [NOTE] ===================================================================== * No backward compatibility with the old generator YAML format. -* When using -p option, TRex will not comply with the static route rules. Server-side traffic may be sent from the client side (port 0) and vice-versa. Use the -p option only with PBR configuration when the router, switch p1<->p2. -//TBD: "when router..." unclear -* VLAN (sub interface feature) does not comply with static route rules. Use it only with PBR. - VLAN0 <-> VALN1 per interface - vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 } -* Limitation: When using a template with plugins (bundles), the number of servers must be higher than the number of clients. +* When using -p option, TRex will not comply with the static route rules. Server-side traffic may be sent from the client side (port 0) and vice-versa. +If you use the -p option, you must configure policy based routing to pass all traffic from router port 1 to router port 2, and vice versa. +* xref:trex_vlan[VLAN] feature does not comply with static route rules. If you use it, you also need policy based routing +rules to pass packets from VLAN0 to VLAN1 and vice versa. +* Limitation: When using template with plugins (bundles), the number of servers must be higher than the number of clients. ===================================================================== ==== More Details about IP allocations -Each time a new flow is creaed, TRex allocates a new Client IP/port and Server IP. This 3-tuple should be distinct among active flows. +Each time a new flow is created, TRex allocates new Client IP/port and Server IP. This 3-tuple should be distinct among active flows. -Currently, only sequcency distribution is supported in IP allocation. That means the IP address is increased one by one. +Currently, only sequential distribution is supported in IP allocation. This means the IP address is increased by one for each flow. -Let's say if we have 2 candidate IPs in the pool: 16.0.0.1 and 16.0.0.2. So the sequence of allocated clients should be something like this: +For example, if we have a pool of two IP addresses: 16.0.0.1 and 16.0.0.2, the allocation of client src/port pairs will be [source,python] ---- @@ -3149,22 +3122,25 @@ Let's say if we have 2 candidate IPs in the pool: 16.0.0.1 and 16.0.0.2. So the 16.0.0.0.2 [1024] 16.0.0.0.1 [1025] 16.0.0.0.2 [1025] +16.0.0.0.1 [1026] +16.0.0.0.2 [1026] +... ---- -==== How to decide the PPS and BPS +==== How to determine the packet per second(PPS) and Bit per second (BPS) -- Example of one flow with 4 packets -- Green are first packet of flow -- Lets say the client ip pool starts from 16.0.0.1, and the distribution is seq. +- Let's look at an example of one flow with 4 packets. +- Green circles represent the first packet of each flow. +- The client ip pool starts from 16.0.0.1, and the distribution is seq. -image:images/ip_allocation.png[title="rigt"] +image:images/ip_allocation.png[title=""] latexmath:[$Total PPS = \sum_{k=0}^{n}(CPS_{k}\times {flow\_pkts}_{k})$] latexmath:[$Concurrent flow = \sum_{k=0}^{n}CPS_{k}\times flow\_duration_k $] +// TBD Ido: The latexmath formulas only looks good in pdf format. In HTML they are not clear. - -The above fomulars can be used to calculate the PPS. The TRex throughput depends on the PPS calculated above and the value of m (a multiplier assigned by TRex cli). +The above formulas can be used to calculate the PPS. The TRex throughput depends on the PPS calculated above and the value of m (a multiplier given as command line argument -m). The m value is a multiplier of total pcap files CPS. CPS of pcap file is configured on yaml file. @@ -3193,7 +3169,7 @@ So if the m is set as 1, the total PPS is : 102*2+50*20 = 1204 PPS. The BPS depends on the packet size. You can refer to your packet size and get the BPS = PPS*Packet_size. -==== Client/Server IP allocation +==== Per template allocation + future plans - *1) per-template generator* @@ -3246,13 +3222,13 @@ The YAML configuration is something like this: w : 1 ---- -- *2) More distributions will be supported (normal distribution, random distribution, etc)* +- *2) More distributions will be supported in the future (normal distribution for example)* Currently, only sequcence and random are supported. - *3) Histogram of tuple pool will be supported* -This feature gives user more flexibility to define the IP generator. +This feature will give the user more flexibility in defining the IP generator. [source,python] ---- @@ -3282,9 +3258,9 @@ This feature gives user more flexibility to define the IP generator. -=== Measure Jitter/Latency +=== Measuring Jitter/Latency -To measure jitter/latency on high priorty packets (one SCTP or ICMP flow), use `-l [Hz]` where Hz defines the number of packets to send from each port per second. +To measure jitter/latency using independent flows (SCTP or ICMP), use `-l [Hz]` where Hz defines the number of packets to send from each port per second. This option measures latency and jitter. We can define the type of traffic used for the latency measurement using the `--l-pkt-mode` option. @@ -3296,12 +3272,12 @@ This option measures latency and jitter. We can define the type of traffic used | 1 | ICMP echo request packets from both sides | 2 | -*Stateful*, send ICMP requests from one side, and matching ICMP responses from other side. +Send ICMP requests from one side, and matching ICMP responses from other side. This is particulary usefull if your DUT drops traffic from outside, and you need to open pin hole to get the outside traffic in (for example when testing a firewall) | 3 | -send ICMP request packets with a constant 0 sequence number. +Send ICMP request packets with a constant 0 sequence number from both sides. |================= diff --git a/trex_config.asciidoc b/trex_config.asciidoc index 42f21b62..ca68bbe7 100755 --- a/trex_config.asciidoc +++ b/trex_config.asciidoc @@ -1,10 +1,10 @@ TRex first time configuration ============================= -:author: hhaim with the Help of Amir Kroparo +:author: hhaim with the Help of Amir Kroparo. New rev fixes by Ido Barnea. :email: :description: TRex Getting started - instalation guide :revdate: 2014-11-01 -:revnumber: 0.1 +:revnumber: 0.2 :deckjs_theme: swiss :deckjs_transition: horizontal-slide :scrollable: @@ -94,181 +94,169 @@ html, body { $('#title-slide').css("background-position","center"); $('h1').html(''); $('h3').html('Hanoch Haim '); - $('h4').html('04/2015'); + $('h4').html('Updated 10/2016'); ++++++++++++++++++ +== General info +* This guide will help you configure Cisco ASR1K as DUT connected to TRex running in stateful mode. +* This can be easily adopted for working with any L3 device. Equivalent commands for configuring Linux as your DUT are shown at the end as well. +* Two options are given for configuring the router. Policy based route, and static route. You should +choose the one appropriate for your needs. +* TRex should be directly connected to ASR1K ports, and will act as both client and server. -== Simple configuration +== Setup description -* TRex does not implement ARP emulation -* This guide will help you to configure Cisco ASR1K to work with TRex -* TRex is directly connected to ASR1K ports. +* TRex will emulate the networks described in the figure below (on each side of the DUT, router connected to one or more clients/servers networks). -image::images/TrexConfig.png[title="TRex/Router setup"] -. TRex port 0 - Client side -. Router TenG 0/0/0 -. Router TenG 0/0/1 -. TRex port 1 - Server side - +image::images/trex-asr-setup.png[title="TRex/Router setup"] + +== Not supported setup description + +* Notice that the following setup is *not* supported (Having TRex emulate a bunch of hosts connected by switch to the DUT). +This means that the TRex IP addresses defined in ``generator'' section should be in different network then the DUT addresses +and TRex addresses defined in port_info section. + +image::images/trex-not-supported-setup.png[title="Not supported setup"] == TRex configuration -* TRex act as both client and server side -* TRex port mac address should configure correctly, so packet generated from port 1 will get to 2 and vice-versa -* To use the config file you can add this switch `--cfg [file]` -* Or edit the configuration file in `/etc/trex_cfg.yaml` +* You can specify config file to use by the `--cfg` command line argument +or use the default config file `/etc/trex_cfg.yaml` +* Below is an example of how to configure TRex IP addresses. TRex will issue ARP for default_gw, +and send gratuitous ARP for ip, on each port. This works, starting from TRex version 2.10. +If you want to configure MAC addresses manually (equivalent to static +ARP), or running older TRex version, please see the full description of config file parameters in the manual. [source,python] ---- - - port_limit : 2 - port_info : # set eh mac addr - - dest_mac : [0x0,0x0,0x0,0x1,0x0,0x00] <1> - src_mac : [0x0,0x0,0x0,0x2,0x0,0x00] <2> - - dest_mac : [0x0,0x0,0x0,0x3,0x0,0x00] <3> - src_mac : [0x0,0x0,0x0,0x4,0x0,0x00] <4> + - port_limit : 2 + port_info : + - default_gw : 11.11.11.1 <1> + ip : 11.11.11.2 <2> + - default_gw : 12.12.12.1 <3> + ip : 12.12.12.2 <4> ---- -<1> Correspond to TRex port 0 - should be Router TenG 0/0/0 mac-address -<2> Should be distinc mac-address, router should be configure to sent to this mac-address -<3> Correspond to TRex port 1 - should be Router TenG 0/0/1 mac-address -<4> Should be distinc mac-address, router should be configure to sent to this mac-address +<1> TRex port 0 config- should be router's TenG 0/0/0 IP. +TRex will try to resolve this address by sending ARP request. +<2> Next hop of router's TenG 0/0/0. TRex will send gratuitous ARP for this address. +<3> TRex port 1 config- should be router's TenG 0/0/1 IP. +TRex will try to resolve this address by sending ARP request. +<4> Next hop of router's TenG 0/0/0. TRex will send gratuitous ARP for this address. + +== TRex emulated server/client IPs definition in traffic config file -== Router configuration PBR part 1 +* You specify traffic config file by running TRex with -f (TRex stateful mode). +* Examples for client config files exist in TREX_ROOT/scripts/cfg directory. +* Add following section to the traffic config file, to define the range of IPs for clients and servers. -* Router moves packets from port 0->1 and 1->0 without looking into IP address. +[source,python] +---- +generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.0.240" +---- -* TenG 0/0/0 <-> TenG 0/0/1 +* In this example, there are: +** 255 clients talking to 240 servers -*Router configuration:*:: +== Router config. Option 1 - static routes [source,python] ---- interface TenGigabitEthernet0/0/0 - mac-address 0000.0001.0000 <1> - mtu 4000 <2> - ip address 11.11.11.11 255.255.255.0 <3> - ip policy route-map p1_to_p2 <4> - load-interval 30 + ip address 11.11.11.1 255.255.255.0 ! - +` interface TenGigabitEthernet0/0/1 - mac-address 0000.0003.0000 <5> - mtu 4000 - ip address 12.11.11.11 255.255.255.0 - ip policy route-map p2_to_p1 - load-interval 30 + ip address 12.12.12.1 255.255.255.0 ! +ip route 16.0.0.0 255.0.0.0 11.11.11.2 <1> +ip route 48.0.0.0 255.0.0.0 12.12.12.2 <2> ---- -<1> Configure mac-address to match TRex destination port-0 -<2> Set MTU -<3> Set an ip address ( routing can't work without this) -<4> Configure PBR policy - see next slide -<5> Configure mac-address to match TRex destination port-1 +<1> Route clients network to TRex server emulation interface. +<2> Route servers network to TRex client emulation interface. -== Router configuration PBR part 2 +== Router config. Option 2 - PBR part 1 + +* Router is configured to statically route packets from 0/0/0 to 0/0/1 and from 0/0/1 to 0/0/0. + +*Router configuration:*:: [source,python] ---- - -route-map p1_to_p2 permit 10 - set ip next-hop 12.11.11.12 <1> +interface TenGigabitEthernet0/0/0 + ip address 11.11.11.1 255.255.255.0 <1> + ip policy route-map p1_to_p2 <2> + load-interval 30 ! -route-map p2_to_p1 permit 10 - set ip next-hop 11.11.11.12 <2> +interface TenGigabitEthernet0/0/1 + ip address 12.12.12.1 255.255.255.0 <1> + ip policy route-map p2_to_p1 <2> + load-interval 30 +! ---- +<1> Configure ip address for the port. +<2> Configure PBR policy - see next slide -<1> Set the destination packet to be 12.11.11.12 which correspond to TenG 0/0/1 -<2> Set the destination packet to be 11.11.11.12 which correspond to TenG 0/0/0 - - -== Router configuration PBR part 3 - -* What about destination mac-address it should be TRex source mac-address? -* The folowing configuration address it +== Router config. Option 2 - PBR part 2 [source,python] ---- - arp 11.11.11.12 0000.0002.0000 ARPA <1> - arp 12.11.11.12 0000.0004.0000 ARPA <2> ----- -<1> Destination mac-address of packets sent from If 0/0/0 is matched to TRex source mac-address port-0 -<2> Destination mac-address of packets sent from If 0/0/1 is matched to TRex source mac-address port-1 -== Static-route configuration - TRex - -* You can set static range of IPs for client and server side +route-map p1_to_p2 permit 10 + set ip next-hop 12.12.12.2 <1> +! +route-map p2_to_p1 permit 10 + set ip next-hop 11.11.11.2 <2> -[source,python] ----- -generator : - distribution : "seq" - clients_start : "16.0.0.1" - clients_end : "16.0.0.255" - servers_start : "48.0.0.1" - servers_end : "48.0.0.240" - dual_port_mask : "1.0.0.0" - tcp_aging : 0 - udp_aging : 0 ---- -* In this example, you should expect: -** Number of clients 255 -** Number of servers 240 +<1> Set the destination to be 12.12.12.2, in the subnet of TenG 0/0/1. +<2> Set the destination to be 11.11.11.2 , in the subnet to TenG 0/0/0. -== Static-route configuration - Router +== Verify cable connections -[source,python] ----- -interface TenGigabitEthernet0/0/0 - mac-address 0000.0001.0000 - mtu 4000 - ip address 11.11.11.11 255.255.255.0 -! -` -interface TenGigabitEthernet0/0/1 - mac-address 0000.0003.0000 - mtu 4000 - ip address 22.11.11.11 255.255.255.0 -! -ip route 16.0.0.0 255.0.0.0 11.11.11.12 <1> -ip route 48.0.0.0 255.0.0.0 22.11.11.12 <2> ----- -<1> Match the range of TRex YAML ( client side 0/0/0 ) -<2> Match the range of TRex YAML ( server side 0/0/1) - -== Verify configuration - -* To verify that TRex port-0 is connected to Router 0/0/0 and not 0/0/1 run +* To verify that TRex port-0 is really connected to Router 0/0/0, you can run the following. ........................................... -$./t-rex-64 -f cap2/dns.yaml -m 1 -d 100 -l 1000 --lo --lm 1 +$./t-rex-64 -f cap2/dns.yaml -m 1 -d 10 -l 1000 --lo --lm 1 ........................................... * It sends packets only from TRex port-0 ( `--lm 1` ) * to send only from TRex port 1 do this: ........................................... -$./t-rex-64 -f cap2/dns.yaml -m 1 -d 100 -l 1000 --lo --lm 2 +$./t-rex-64 -f cap2/dns.yaml -m 1 -d 10 -l 1000 --lo --lm 2 ........................................... -* In case you are connected to a Switch you must send packet from both direction first +* If you are connected to a switch, you must send packets from both directions for few seconds first, to allow +the switch to learn the MAC addresses of both sides. ........................................... -$./t-rex-64 -f cap2/dns.yaml -m 1 -d 100 -l 1000 +$./t-rex-64 -f cap2/dns.yaml -m 1 -d 10 -l 1000 ........................................... +== Linux config + +* Assuming the same setup with Linux as DUT instead of the router, you can do the following. +* Configure IPs of Linux interfaces to 12.12.12.1 and 11.11.11.1 +* route add -net 48.0.0.0 netmask 255.0.0.0 gw 12.12.12.2 +* route add -net 16.0.0.0 netmask 255.0.0.0 gw 11.11.11.2 -== Static-route configuration - IPV6 + +== Static route configuration - IPV6 [source,python] ---- interface TenGigabitEthernet1/0/0 - mac-address 0000.0001.0000 - mtu 4000 - ip address 11.11.11.11 255.255.255.0 + ip address 11.11.11.1 255.255.255.0 ip policy route-map p1_to_p2 load-interval 30 ipv6 enable #<1> @@ -300,8 +288,3 @@ csi-mcp-asr1k-40(config)#ipv6 route 5000::/64 3001::2 <5> Mac-addr setting should be like TRex <6> PBR configuraion - - - - - diff --git a/trex_faq.asciidoc b/trex_faq.asciidoc index 169b04be..44f4f237 100644 --- a/trex_faq.asciidoc +++ b/trex_faq.asciidoc @@ -197,13 +197,21 @@ Then, you can find some basic examples link:trex_manual.html#_trex_command_line[ A switch might be configured with spanning tree enabled. TRex reset the port at startup, making the switch reset it side as well, and spanning tree can drop the packets until it stabilizes. Disabling spanning tree can help. On Cisco nexus, you can do that using `spanning-tree port type edge` -You can also start Cisco with -k flag. This will send packets for k seconds before starting the actual test, letting the spanning +You can also start TRex with -k flag. This will send packets for k seconds before starting the actual test, letting the spanning tree time to stabilize. This issue will be fixed when we consolidate ``Stateful'' and ``Stateless'' RPC. -==== I can not see RX packets -TRex does not support ARP yet, you should configure the DUT to send the packets to the TRex port MAC address. From Stateless mode, you can change the port mode to promiscuous. + -Also, revisit your MAC address configuration in the TRex config file. Wrong MAC address configuration will cause all packets to be dropped. +==== I can not see any RX packets. +Most common reason is problems with MAC addresses. +If your ports are connected in loopback, follow link:trex_manual.html#_configuring_for_loopback[this] carefully. + +If loopback worked for you, continue link:trex_manual.html#_configuring_for_running_with_router_or_other_l3_device_as_dut[here]. + +If you set MAC addresses manually in your config file, check again that they are correct. + +If you have ip and default_gw in your config file, you can debug the initial ARP resolution process by running TRex with +-d 1 flag (will stop TRex 1 second after init phase, so you can scroll up and look at init stage log), and -v 1. +This will dump the result of ARP resolution (``dst MAC:...''). You can also try -v 3. +This will print more debug info, and also ARP packets TX/RX indication and statistics. + +On the DUT side - If you configured static ARP, verify it is correct. If you depend on TRex gratuitous ARP messages, look at the DUT ARP +table after TRex init phase and verify its correctness. ==== Why the performance is low? @@ -342,12 +350,12 @@ Q:: I want to use the Python API via Java (with Jython), apparently, I can not i The way I see it I have two options: 1. Creating python scripts and call them from java (with ProcessBuilder for example) -2. Call directly to the Trex server over RPC from Java +2. Call directly to the TRex server over RPC from Java However, option 2 seems like a re-writing the API for Java (which I am not going to do) On the other hand, with option 1, once the script is done, the client object destroyed and I cannot use it anymore in my tests. -Any ideas on what is the best way to use Trex within JAVA? +Any ideas on what is the best way to use TRex within JAVA? A:: diff --git a/trex_preso.asciidoc b/trex_preso.asciidoc index c89bca09..46ec91f8 100755 --- a/trex_preso.asciidoc +++ b/trex_preso.asciidoc @@ -110,6 +110,7 @@ html, body { $('h1').html(''); $('h3').html('Hanoch Haim v1.2'); $('h4').html('04/2015'); + $('h5').html('Updated 10/2016'); + + +
+ + + +
+

Slide

+
+ +
+

Content

+
+ +
+

Here

+
+ + + + + + +
+ + +
+ + +

+ + / + +

+ + +
+ + + + +
+ + +
+ + + + + + + + + + + + + + + + diff --git a/backends/deckjs/deck.js/core/deck.core.css b/backends/deckjs/deck.js/core/deck.core.css new file mode 100644 index 00000000..da619cb0 --- /dev/null +++ b/backends/deckjs/deck.js/core/deck.core.css @@ -0,0 +1,60 @@ +html, body { + height: 100%; + padding: 0; + margin: 0; +} + +.deck-container { + position: relative; + min-height: 100%; + margin: 0 auto; + overflow: hidden; + overflow-y: auto; +} +.js .deck-container { + visibility: hidden; +} +.ready .deck-container { + visibility: visible; +} +.touch .deck-container { + -webkit-text-size-adjust: none; + -moz-text-size-adjust: none; +} + +.deck-loading { + display: none; +} + +.slide { + width: auto; + min-height: 100%; + position: relative; +} + +.deck-before, .deck-previous, .deck-next, .deck-after { + position: absolute; + left: -999em; + top: -999em; +} + +.deck-current { + z-index: 2; +} + +.slide .slide { + visibility: hidden; + position: static; + min-height: 0; +} + +.deck-child-current { + position: static; + z-index: 2; +} +.deck-child-current .slide { + visibility: hidden; +} +.deck-child-current .deck-previous, .deck-child-current .deck-before, .deck-child-current .deck-current { + visibility: visible; +} diff --git a/backends/deckjs/deck.js/core/deck.core.js b/backends/deckjs/deck.js/core/deck.core.js new file mode 100644 index 00000000..a8adefe7 --- /dev/null +++ b/backends/deckjs/deck.js/core/deck.core.js @@ -0,0 +1,748 @@ +/*! +Deck JS - deck.core +Copyright (c) 2011-2014 Caleb Troughton +Dual licensed under the MIT license. +https://github.com/imakewebthings/deck.js/blob/master/MIT-license.txt +*/ + +/* +The deck.core module provides all the basic functionality for creating and +moving through a deck. It does so by applying classes to indicate the state of +the deck and its slides, allowing CSS to take care of the visual representation +of each state. It also provides methods for navigating the deck and inspecting +its state, as well as basic key bindings for going to the next and previous +slides. More functionality is provided by wholly separate extension modules +that use the API provided by core. +*/ +(function($, undefined) { + var slides, currentIndex, $container, $fragmentLinks; + + var events = { + /* + This event fires at the beginning of a slide change, before the actual + change occurs. Its purpose is to give extension authors a way to prevent + the slide change from occuring. This is done by calling preventDefault + on the event object within this event. If that is done, the deck.change + event will never be fired and the slide will not change. + */ + beforeChange: 'deck.beforeChange', + + /* + This event fires whenever the current slide changes, whether by way of + next, prev, or go. The callback function is passed two parameters, from + and to, equal to the indices of the old slide and the new slide + respectively. If preventDefault is called on the event within this handler + the slide change does not occur. + + $(document).bind('deck.change', function(event, from, to) { + alert('Moving from slide ' + from + ' to ' + to); + }); + */ + change: 'deck.change', + + /* + This event fires at the beginning of deck initialization. This event makes + a good hook for preprocessing extensions looking to modify the DOM before + the deck is fully initialized. It is also possible to halt the deck.init + event from firing while you do things in beforeInit. This can be done by + calling lockInit on the event object passed to this event. The init can be + released by calling releaseInit. + + $(document).bind('deck.beforeInit', function(event) { + event.lockInit(); // halts deck.init event + window.setTimeout(function() { + event.releaseInit(); // deck.init will now fire 2 seconds later + }, 2000); + }); + + The init event will be fired regardless of locks after + options.initLockTimeout milliseconds. + */ + beforeInitialize: 'deck.beforeInit', + + /* + This event fires at the end of deck initialization. Extensions should + implement any code that relies on user extensible options (key bindings, + element selectors, classes) within a handler for this event. Native + events associated with Deck JS should be scoped under a .deck event + namespace, as with the example below: + + var $d = $(document); + $.deck.defaults.keys.myExtensionKeycode = 70; // 'h' + $d.bind('deck.init', function() { + $d.bind('keydown.deck', function(event) { + if (event.which === $.deck.getOptions().keys.myExtensionKeycode) { + // Rock out + } + }); + }); + */ + initialize: 'deck.init' + }; + + var options = {}; + var $document = $(document); + var $window = $(window); + var stopPropagation = function(event) { + event.stopPropagation(); + }; + + var updateContainerState = function() { + var oldIndex = $container.data('onSlide'); + $container.removeClass(options.classes.onPrefix + oldIndex); + $container.addClass(options.classes.onPrefix + currentIndex); + $container.data('onSlide', currentIndex); + }; + + var updateChildCurrent = function() { + var $oldCurrent = $('.' + options.classes.current); + var $oldParents = $oldCurrent.parentsUntil(options.selectors.container); + var $newCurrent = slides[currentIndex]; + var $newParents = $newCurrent.parentsUntil(options.selectors.container); + $oldParents.removeClass(options.classes.childCurrent); + $newParents.addClass(options.classes.childCurrent); + }; + + var removeOldSlideStates = function() { + var $all = $(); + $.each(slides, function(i, el) { + $all = $all.add(el); + }); + $all.removeClass([ + options.classes.before, + options.classes.previous, + options.classes.current, + options.classes.next, + options.classes.after + ].join(' ')); + }; + + var addNewSlideStates = function() { + slides[currentIndex].addClass(options.classes.current); + if (currentIndex > 0) { + slides[currentIndex-1].addClass(options.classes.previous); + } + if (currentIndex + 1 < slides.length) { + slides[currentIndex+1].addClass(options.classes.next); + } + if (currentIndex > 1) { + $.each(slides.slice(0, currentIndex - 1), function(i, $slide) { + $slide.addClass(options.classes.before); + }); + } + if (currentIndex + 2 < slides.length) { + $.each(slides.slice(currentIndex+2), function(i, $slide) { + $slide.addClass(options.classes.after); + }); + } + }; + + var setAriaHiddens = function() { + $(options.selectors.slides).each(function() { + var $slide = $(this); + var isSub = $slide.closest('.' + options.classes.childCurrent).length; + var isBefore = $slide.hasClass(options.classes.before) && !isSub; + var isPrevious = $slide.hasClass(options.classes.previous) && !isSub; + var isNext = $slide.hasClass(options.classes.next); + var isAfter = $slide.hasClass(options.classes.after); + var ariaHiddenValue = isBefore || isPrevious || isNext || isAfter; + $slide.attr('aria-hidden', ariaHiddenValue); + }); + }; + + var updateStates = function() { + updateContainerState(); + updateChildCurrent(); + removeOldSlideStates(); + addNewSlideStates(); + if (options.setAriaHiddens) { + setAriaHiddens(); + } + }; + + var initSlidesArray = function(elements) { + if ($.isArray(elements)) { + $.each(elements, function(i, element) { + slides.push($(element)); + }); + } + else { + $(elements).each(function(i, element) { + slides.push($(element)); + }); + } + }; + + var bindKeyEvents = function() { + var editables = [ + 'input', + 'textarea', + 'select', + 'button', + 'meter', + 'progress', + '[contentEditable]' + ].join(', '); + + $document.unbind('keydown.deck').bind('keydown.deck', function(event) { + var isNext = event.which === options.keys.next; + var isPrev = event.which === options.keys.previous; + isNext = isNext || $.inArray(event.which, options.keys.next) > -1; + isPrev = isPrev || $.inArray(event.which, options.keys.previous) > -1; + + if (isNext) { + methods.next(); + event.preventDefault(); + } + else if (isPrev) { + methods.prev(); + event.preventDefault(); + } + }); + + $document.undelegate(editables, 'keydown.deck', stopPropagation); + $document.delegate(editables, 'keydown.deck', stopPropagation); + }; + + var bindTouchEvents = function() { + var startTouch; + var direction = options.touch.swipeDirection; + var tolerance = options.touch.swipeTolerance; + var listenToHorizontal = ({ both: true, horizontal: true })[direction]; + var listenToVertical = ({ both: true, vertical: true })[direction]; + + $container.unbind('touchstart.deck'); + $container.bind('touchstart.deck', function(event) { + if (!startTouch) { + startTouch = $.extend({}, event.originalEvent.targetTouches[0]); + } + }); + + $container.unbind('touchmove.deck'); + $container.bind('touchmove.deck', function(event) { + $.each(event.originalEvent.changedTouches, function(i, touch) { + if (!startTouch || touch.identifier !== startTouch.identifier) { + return true; + } + var xDistance = touch.screenX - startTouch.screenX; + var yDistance = touch.screenY - startTouch.screenY; + var leftToRight = xDistance > tolerance && listenToHorizontal; + var rightToLeft = xDistance < -tolerance && listenToHorizontal; + var topToBottom = yDistance > tolerance && listenToVertical; + var bottomToTop = yDistance < -tolerance && listenToVertical; + + if (leftToRight || topToBottom) { + $.deck('prev'); + startTouch = undefined; + } + else if (rightToLeft || bottomToTop) { + $.deck('next'); + startTouch = undefined; + } + return false; + }); + + if (listenToVertical) { + event.preventDefault(); + } + }); + + $container.unbind('touchend.deck'); + $container.bind('touchend.deck', function(event) { + $.each(event.originalEvent.changedTouches, function(i, touch) { + if (startTouch && touch.identifier === startTouch.identifier) { + startTouch = undefined; + } + }); + }); + }; + + var indexInBounds = function(index) { + return typeof index === 'number' && index >=0 && index < slides.length; + }; + + var createBeforeInitEvent = function() { + var event = $.Event(events.beforeInitialize); + event.locks = 0; + event.done = $.noop; + event.lockInit = function() { + ++event.locks; + }; + event.releaseInit = function() { + --event.locks; + if (!event.locks) { + event.done(); + } + }; + return event; + }; + + var goByHash = function(str) { + var id = str.substr(str.indexOf("#") + 1); + + $.each(slides, function(i, $slide) { + if ($slide.attr('id') === id) { + $.deck('go', i); + return false; + } + }); + + // If we don't set these to 0 the container scrolls due to hashchange + if (options.preventFragmentScroll) { + $.deck('getContainer').scrollLeft(0).scrollTop(0); + } + }; + + var assignSlideId = function(i, $slide) { + var currentId = $slide.attr('id'); + var previouslyAssigned = $slide.data('deckAssignedId') === currentId; + if (!currentId || previouslyAssigned) { + $slide.attr('id', options.hashPrefix + i); + $slide.data('deckAssignedId', options.hashPrefix + i); + } + }; + + var removeContainerHashClass = function(id) { + $container.removeClass(options.classes.onPrefix + id); + }; + + var addContainerHashClass = function(id) { + $container.addClass(options.classes.onPrefix + id); + }; + + var setupHashBehaviors = function() { + $fragmentLinks = $(); + $.each(slides, function(i, $slide) { + var hash; + + assignSlideId(i, $slide); + hash = '#' + $slide.attr('id'); + if (hash === window.location.hash) { + setTimeout(function() { + $.deck('go', i); + }, 1); + } + $fragmentLinks = $fragmentLinks.add('a[href="' + hash + '"]'); + }); + + if (slides.length) { + addContainerHashClass($.deck('getSlide').attr('id')); + }; + }; + + var changeHash = function(from, to) { + var hash = '#' + $.deck('getSlide', to).attr('id'); + var hashPath = window.location.href.replace(/#.*/, '') + hash; + + removeContainerHashClass($.deck('getSlide', from).attr('id')); + addContainerHashClass($.deck('getSlide', to).attr('id')); + if (Modernizr.history) { + window.history.replaceState({}, "", hashPath); + } + }; + + /* Methods exposed in the jQuery.deck namespace */ + var methods = { + + /* + jQuery.deck(selector, options) + + selector: string | jQuery | array + options: object, optional + + Initializes the deck, using each element matched by selector as a slide. + May also be passed an array of string selectors or jQuery objects, in + which case each selector in the array is considered a slide. The second + parameter is an optional options object which will extend the default + values. + + Users may also pass only an options object to init. In this case the slide + selector will be options.selectors.slides which defaults to .slide. + + $.deck('.slide'); + + or + + $.deck([ + '#first-slide', + '#second-slide', + '#etc' + ]); + */ + init: function(opts) { + var beforeInitEvent = createBeforeInitEvent(); + var overrides = opts; + + if (!$.isPlainObject(opts)) { + overrides = arguments[1] || {}; + $.extend(true, overrides, { + selectors: { + slides: arguments[0] + } + }); + } + + options = $.extend(true, {}, $.deck.defaults, overrides); + slides = []; + currentIndex = 0; + $container = $(options.selectors.container); + + // Hide the deck while states are being applied to kill transitions + $container.addClass(options.classes.loading); + + // populate the array of slides for pre-init + initSlidesArray(options.selectors.slides); + // Pre init event for preprocessing hooks + beforeInitEvent.done = function() { + // re-populate the array of slides + slides = []; + initSlidesArray(options.selectors.slides); + setupHashBehaviors(); + bindKeyEvents(); + bindTouchEvents(); + $container.scrollLeft(0).scrollTop(0); + + if (slides.length) { + updateStates(); + } + + // Show deck again now that slides are in place + $container.removeClass(options.classes.loading); + $document.trigger(events.initialize); + }; + + $document.trigger(beforeInitEvent); + if (!beforeInitEvent.locks) { + beforeInitEvent.done(); + } + window.setTimeout(function() { + if (beforeInitEvent.locks) { + if (window.console) { + window.console.warn('Something locked deck initialization\ + without releasing it before the timeout. Proceeding with\ + initialization anyway.'); + } + beforeInitEvent.done(); + } + }, options.initLockTimeout); + }, + + /* + jQuery.deck('go', index) + + index: integer | string + + Moves to the slide at the specified index if index is a number. Index is + 0-based, so $.deck('go', 0); will move to the first slide. If index is a + string this will move to the slide with the specified id. If index is out + of bounds or doesn't match a slide id the call is ignored. + */ + go: function(indexOrId) { + var beforeChangeEvent = $.Event(events.beforeChange); + var index; + + /* Number index, easy. */ + if (indexInBounds(indexOrId)) { + index = indexOrId; + } + /* Id string index, search for it and set integer index */ + else if (typeof indexOrId === 'string') { + $.each(slides, function(i, $slide) { + if ($slide.attr('id') === indexOrId) { + index = i; + return false; + } + }); + } + if (typeof index === 'undefined') { + return; + } + + /* Trigger beforeChange. If nothing prevents the change, trigger + the slide change. */ + $document.trigger(beforeChangeEvent, [currentIndex, index]); + if (!beforeChangeEvent.isDefaultPrevented()) { + $document.trigger(events.change, [currentIndex, index]); + changeHash(currentIndex, index); + currentIndex = index; + updateStates(); + } + }, + + /* + jQuery.deck('next') + + Moves to the next slide. If the last slide is already active, the call + is ignored. + */ + next: function() { + methods.go(currentIndex+1); + }, + + /* + jQuery.deck('prev') + + Moves to the previous slide. If the first slide is already active, the + call is ignored. + */ + prev: function() { + methods.go(currentIndex-1); + }, + + /* + jQuery.deck('getSlide', index) + + index: integer, optional + + Returns a jQuery object containing the slide at index. If index is not + specified, the current slide is returned. + */ + getSlide: function(index) { + index = typeof index !== 'undefined' ? index : currentIndex; + if (!indexInBounds(index)) { + return null; + } + return slides[index]; + }, + + /* + jQuery.deck('getSlides') + + Returns all slides as an array of jQuery objects. + */ + getSlides: function() { + return slides; + }, + + /* + jQuery.deck('getTopLevelSlides') + + Returns all slides that are not subslides. + */ + getTopLevelSlides: function() { + var topLevelSlides = []; + var slideSelector = options.selectors.slides; + var subSelector = [slideSelector, slideSelector].join(' '); + $.each(slides, function(i, $slide) { + if (!$slide.is(subSelector)) { + topLevelSlides.push($slide); + } + }); + return topLevelSlides; + }, + + /* + jQuery.deck('getNestedSlides', index) + + index: integer, optional + + Returns all the nested slides of the current slide. If index is + specified it returns the nested slides of the slide at that index. + If there are no nested slides this will return an empty array. + */ + getNestedSlides: function(index) { + var targetIndex = index == null ? currentIndex : index; + var $targetSlide = $.deck('getSlide', targetIndex); + var $nesteds = $targetSlide.find(options.selectors.slides); + var nesteds = $nesteds.get(); + return $.map(nesteds, function(slide, i) { + return $(slide); + }); + }, + + + /* + jQuery.deck('getContainer') + + Returns a jQuery object containing the deck container as defined by the + container option. + */ + getContainer: function() { + return $container; + }, + + /* + jQuery.deck('getOptions') + + Returns the options object for the deck, including any overrides that + were defined at initialization. + */ + getOptions: function() { + return options; + }, + + /* + jQuery.deck('extend', name, method) + + name: string + method: function + + Adds method to the deck namespace with the key of name. This doesn’t + give access to any private member data — public methods must still be + used within method — but lets extension authors piggyback on the deck + namespace rather than pollute jQuery. + + $.deck('extend', 'alert', function(msg) { + alert(msg); + }); + + // Alerts 'boom' + $.deck('alert', 'boom'); + */ + extend: function(name, method) { + methods[name] = method; + } + }; + + /* jQuery extension */ + $.deck = function(method, arg) { + var args = Array.prototype.slice.call(arguments, 1); + if (methods[method]) { + return methods[method].apply(this, args); + } + else { + return methods.init(method, arg); + } + }; + + /* + The default settings object for a deck. All deck extensions should extend + this object to add defaults for any of their options. + + options.classes.after + This class is added to all slides that appear after the 'next' slide. + + options.classes.before + This class is added to all slides that appear before the 'previous' + slide. + + options.classes.childCurrent + This class is added to all elements in the DOM tree between the + 'current' slide and the deck container. For standard slides, this is + mostly seen and used for nested slides. + + options.classes.current + This class is added to the current slide. + + options.classes.loading + This class is applied to the deck container during loading phases and is + primarily used as a way to short circuit transitions between states + where such transitions are distracting or unwanted. For example, this + class is applied during deck initialization and then removed to prevent + all the slides from appearing stacked and transitioning into place + on load. + + options.classes.next + This class is added to the slide immediately following the 'current' + slide. + + options.classes.onPrefix + This prefix, concatenated with the current slide index, is added to the + deck container as you change slides. + + options.classes.previous + This class is added to the slide immediately preceding the 'current' + slide. + + options.selectors.container + Elements matched by this CSS selector will be considered the deck + container. The deck container is used to scope certain states of the + deck, as with the onPrefix option, or with extensions such as deck.goto + and deck.menu. + + options.selectors.slides + Elements matched by this selector make up the individual deck slides. + If a user chooses to pass the slide selector as the first argument to + $.deck() on initialization it does the same thing as passing in this + option and this option value will be set to the value of that parameter. + + options.keys.next + The numeric keycode used to go to the next slide. + + options.keys.previous + The numeric keycode used to go to the previous slide. + + options.touch.swipeDirection + The direction swipes occur to cause slide changes. Can be 'horizontal', + 'vertical', or 'both'. Any other value or a falsy value will disable + swipe gestures for navigation. + + options.touch.swipeTolerance + The number of pixels the users finger must travel to produce a swipe + gesture. + + options.initLockTimeout + The number of milliseconds the init event will wait for BeforeInit event + locks to be released before firing the init event regardless. + + options.hashPrefix + Every slide that does not have an id is assigned one at initialization. + Assigned ids take the form of hashPrefix + slideIndex, e.g., slide-0, + slide-12, etc. + + options.preventFragmentScroll + When deep linking to a hash of a nested slide, this scrolls the deck + container to the top, undoing the natural browser behavior of scrolling + to the document fragment on load. + + options.setAriaHiddens + When set to true, deck.js will set aria hidden attributes for slides + that do not appear onscreen according to a typical heirarchical + deck structure. You may want to turn this off if you are using a theme + where slides besides the current slide are visible on screen and should + be accessible to screenreaders. + */ + $.deck.defaults = { + classes: { + after: 'deck-after', + before: 'deck-before', + childCurrent: 'deck-child-current', + current: 'deck-current', + loading: 'deck-loading', + next: 'deck-next', + onPrefix: 'on-slide-', + previous: 'deck-previous' + }, + + selectors: { + container: '.deck-container', + slides: '.slide' + }, + + keys: { + // enter, space, page down, right arrow, down arrow, + next: [13, 32, 34, 39, 40], + // backspace, page up, left arrow, up arrow + previous: [8, 33, 37, 38] + }, + + touch: { + swipeDirection: 'horizontal', + swipeTolerance: 60 + }, + + initLockTimeout: 10000, + hashPrefix: 'slide-', + preventFragmentScroll: true, + setAriaHiddens: true + }; + + $document.ready(function() { + $('html').addClass('ready'); + }); + + $window.bind('hashchange.deck', function(event) { + if (event.originalEvent && event.originalEvent.newURL) { + goByHash(event.originalEvent.newURL); + } + else { + goByHash(window.location.hash); + } + }); + + $window.bind('load.deck', function() { + if (options.preventFragmentScroll) { + $container.scrollLeft(0).scrollTop(0); + } + }); +})(jQuery); diff --git a/backends/deckjs/deck.js/core/deck.core.scss b/backends/deckjs/deck.js/core/deck.core.scss new file mode 100755 index 00000000..baab0fd9 --- /dev/null +++ b/backends/deckjs/deck.js/core/deck.core.scss @@ -0,0 +1,65 @@ +html, body { + height:100%; + padding:0; + margin:0; +} + +.deck-container { + position:relative; + min-height:100%; + margin:0 auto; + overflow:hidden; + overflow-y:auto; + + .js & { + visibility:hidden; + } + + .ready & { + visibility:visible; + } + + .touch & { + -webkit-text-size-adjust:none; + -moz-text-size-adjust:none; + } +} + +.deck-loading { + display:none; +} + +.slide { + width:auto; + min-height:100%; + position:relative; +} + +.deck-before, .deck-previous, .deck-next, .deck-after { + position:absolute; + left:-999em; + top:-999em; +} + +.deck-current { + z-index:2; +} + +.slide .slide { + visibility:hidden; + position:static; + min-height:0; +} + +.deck-child-current { + position:static; + z-index:2; + + .slide { + visibility:hidden; + } + + .deck-previous, .deck-before, .deck-current { + visibility:visible; + } +} \ No newline at end of file diff --git a/backends/deckjs/deck.js/core/print.css b/backends/deckjs/deck.js/core/print.css new file mode 100644 index 00000000..0230f4c1 --- /dev/null +++ b/backends/deckjs/deck.js/core/print.css @@ -0,0 +1,25 @@ +body { + font-size: 18pt; +} + +h1 { + font-size: 48pt; +} + +h2 { + font-size: 36pt; +} + +h3 { + font-size: 28pt; +} + +pre { + border: 1px solid #000; + padding: 10px; + white-space: pre-wrap; +} + +.deck-container > .slide { + page-break-after: always; +} diff --git a/backends/deckjs/deck.js/core/print.scss b/backends/deckjs/deck.js/core/print.scss new file mode 100644 index 00000000..02acd4bf --- /dev/null +++ b/backends/deckjs/deck.js/core/print.scss @@ -0,0 +1,14 @@ +body { font-size:18pt; } +h1 { font-size:48pt; } +h2 { font-size:36pt; } +h3 { font-size:28pt; } + +pre { + border:1px solid #000; + padding:10px; + white-space:pre-wrap; +} + +.deck-container > .slide { + page-break-after: always; +} diff --git a/backends/deckjs/deck.js/extensions/deck.js-blank/README.md b/backends/deckjs/deck.js/extensions/deck.js-blank/README.md new file mode 100644 index 00000000..0d5c882a --- /dev/null +++ b/backends/deckjs/deck.js/extensions/deck.js-blank/README.md @@ -0,0 +1,10 @@ +#deck.blank.js + +Deck.blank.js is an extension for the deck.js framework to allow blanking of the current slide, to draw the attention back to the presenter + +At the moment, the key 'b' is hardcoded to blank/unblank. + +##Todo + +- make the blanking key configurable +- make the background-color during blanking configurable \ No newline at end of file diff --git a/backends/deckjs/deck.js/extensions/deck.js-blank/deck.blank.js b/backends/deckjs/deck.js/extensions/deck.js-blank/deck.blank.js new file mode 100644 index 00000000..01955834 --- /dev/null +++ b/backends/deckjs/deck.js/extensions/deck.js-blank/deck.blank.js @@ -0,0 +1,33 @@ +/*! +Deck JS - deck.blank - v1.0 +Copyright (c) 2012 Mike Kellenberger +*/ + +/* +This module adds the necessary methods and key bindings to blank/unblank the screen by pressing 'b'. +*/ +(function($, deck, undefined) { + var $d = $(document); + + $[deck]('extend', 'activateBlankScreen', function() { + $[deck]('getSlide').hide(); + }); + + $[deck]('extend', 'deactivateBlankScreen', function() { + $[deck]('getSlide').show(); + }); + + $[deck]('extend', 'blankScreen', function() { + $[deck]('getSlide').is(":visible") ? $[deck]('activateBlankScreen') : $[deck]('deactivateBlankScreen'); + }); + + $d.bind('deck.init', function() { + // Bind key events + $d.unbind('keydown.blank').bind('keydown.blank', function(e) { + if (e.which==66) { + $[deck]('blankScreen'); + e.preventDefault(); + } + }); + }); +})(jQuery, 'deck'); \ No newline at end of file diff --git a/backends/deckjs/deck.js/extensions/goto/deck.goto.css b/backends/deckjs/deck.js/extensions/goto/deck.goto.css new file mode 100644 index 00000000..935574a1 --- /dev/null +++ b/backends/deckjs/deck.js/extensions/goto/deck.goto.css @@ -0,0 +1,36 @@ +.goto-form { + position: absolute; + z-index: 3; + bottom: 10px; + left: 50%; + height: 1.75em; + margin: 0 0 0 -9.125em; + line-height: 1.75em; + padding: 0.625em; + display: none; + background: #ccc; + overflow: hidden; + border-radius: 10px; +} +.goto-form label { + font-weight: bold; +} +.goto-form label, .goto-form input { + display: inline-block; + font-family: inherit; +} +.deck-goto .goto-form { + display: block; +} + +#goto-slide { + width: 8.375em; + margin: 0 0.625em; + height: 1.4375em; +} + +@media print { + .goto-form, #goto-slide { + display: none; + } +} diff --git a/backends/deckjs/deck.js/extensions/goto/deck.goto.html b/backends/deckjs/deck.js/extensions/goto/deck.goto.html new file mode 100644 index 00000000..4b739982 --- /dev/null +++ b/backends/deckjs/deck.js/extensions/goto/deck.goto.html @@ -0,0 +1,7 @@ + +
+ + + + +
\ No newline at end of file diff --git a/backends/deckjs/deck.js/extensions/goto/deck.goto.js b/backends/deckjs/deck.js/extensions/goto/deck.goto.js new file mode 100644 index 00000000..6a90f16c --- /dev/null +++ b/backends/deckjs/deck.js/extensions/goto/deck.goto.js @@ -0,0 +1,190 @@ +/*! +Deck JS - deck.goto +Copyright (c) 2011-2014 Caleb Troughton +Dual licensed under the MIT license. +https://github.com/imakewebthings/deck.js/blob/master/MIT-license.txt +*/ + +/* +This module adds the necessary methods and key bindings to show and hide a form +for jumping to any slide number/id in the deck (and processes that form +accordingly). The form-showing state is indicated by the presence of a class on +the deck container. +*/ +(function($, undefined) { + var $document = $(document); + var rootCounter; + + var bindKeyEvents = function() { + $document.unbind('keydown.deckgoto'); + $document.bind('keydown.deckgoto', function(event) { + var key = $.deck('getOptions').keys.goto; + if (event.which === key || $.inArray(event.which, key) > -1) { + event.preventDefault(); + $.deck('toggleGoTo'); + } + }); + }; + + var populateDatalist = function() { + var options = $.deck('getOptions'); + var $datalist = $(options.selectors.gotoDatalist); + + $.each($.deck('getSlides'), function(i, $slide) { + var id = $slide.attr('id'); + if (id) { + $datalist.append('