aboutsummaryrefslogtreecommitdiffstats
path: root/docs/report
AgeCommit message (Expand)AuthorFilesLines
2021-11-22Report: Add 2n-icx dataTibor Frank6-15/+6
2021-11-22Report: Hide 2n-icx empty graphsTibor Frank6-6/+16
2021-11-19PAL: Add boxpoints to all box graphsTibor Frank7-17/+12
2021-11-18Report: Improve docs related to TRexVratko Polak4-9/+15
2021-11-15Report: Add dataTibor Frank1-3/+4
2021-11-12Report: Add dataTibor Frank5-32/+32
2021-11-11Report: Add DPDK speedup graphsTibor Frank15-5/+849
2021-11-11Report: Add 4t2c graphs for awsTibor Frank5-0/+356
2021-11-10Report: Add stats tables, aws testsTibor Frank1-0/+39
2021-11-10Report: Update known issuesVratko Polak1-26/+47
2021-11-10Report: Fixes in dynamic contentTibor Frank23-846/+748
2021-11-10report: update to vpp performance release notesMaciek Konstantynowicz1-44/+17
2021-11-09Report: Add 2n-icx, part 2Tibor Frank63-248/+3274
2021-11-09report: edits in aws methodology sectionMaciek Konstantynowicz4-119/+150
2021-11-09Report: Hide aws latencyTibor Frank3-4/+4
2021-11-08Report: Add aws latencyTibor Frank6-7/+71
2021-11-08Report: Add 2n-icx, part 1Tibor Frank1-3/+1
2021-11-08Report: Remove tests removed by 34207Tibor Frank63-3861/+238
2021-11-08Report: Add dataTibor Frank1-1/+1
2021-11-05Report: Small edits in static content.Tibor Frank2-3/+9
2021-11-03fix(Docs): Report section updatespmikus7-32/+23
2021-11-03Report: Add rls dataTibor Frank4-31/+29
2021-10-27Report: Add dataTibor Frank6-7/+3
2021-10-27feat(Docs): Add AWS methodologypmikus5-0/+290
2021-10-27docs: TRex static documentationViliam Luc9-36/+169
2021-10-26feat(Docs): DPDK release notespmikus1-5/+1
2021-10-26Report: Modify aws graphsTibor Frank12-178/+316
2021-10-25Report: Add dataTibor Frank2-3/+3
2021-10-21Trending: Add af_xdpTibor Frank6-186/+180
2021-10-19Report: Add data, fix aws graphsTibor Frank2-12/+12
2021-10-18Report: Fix aws graphsTibor Frank4-40/+40
2021-10-18Report: Move CSIT Documentation to IntroductionTibor Frank10-18/+3
2021-10-18Report: Add RC2 dataTibor Frank1-1/+1
2021-10-14Report: Hide vsap, add dataTibor Frank1-0/+2
2021-10-14Report: Change ref to footnotes in Integration TestsTibor Frank1-60/+27
2021-10-13feat(Docs): Update few sectionspmikus6-50/+31
2021-10-13Report: Fix formatting in 'Integration Tests'Tibor Frank1-65/+106
2021-10-12Report: Hide icx dynamic contentTibor Frank12-1/+25
2021-10-12Report: Fix bugs and typos in static contentTibor Frank6-0/+14
2021-10-12Report: Add dataTibor Frank1-12/+0
2021-10-11Report: Remove 3n TRex testsTibor Frank6-418/+5
2021-10-11Report: Add rc1 and re-test dataTibor Frank4-3/+5
2021-10-08Report: Add TRex testsTibor Frank17-0/+986
2021-10-08Docs: update links by S3Tibor Frank4-14/+14
2021-09-29Report: configure rls2110Tibor Frank49-85/+49
2021-09-28Report: Add awsTibor Frank23-0/+677
2021-08-25lab: update Arm lab docsJuraj Linkeš1-1/+1
2021-08-18Report: Add tables with builds durationsTibor Frank2-0/+56
2021-08-16Report: configure rls2106.33Tibor Frank3-0/+8
2021-08-11RCA: Add CSIT-1791Vratko Polak1-0/+3
L2INPUT_BVI)) { if (ethertype == ETHERNET_TYPE_ARP) { ethernet_arp_header_t *arp0 = (ethernet_arp_header_t *) l3h0; if (arp0->opcode == clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_request)) vnet_buffer (b0)->l2.shg = 0; } else /* must be ICMPv6 */ { ip6_header_t *iph0 = (ip6_header_t *) l3h0; icmp6_neighbor_solicitation_or_advertisement_header_t *ndh0; ndh0 = ip6_next_header (iph0); if (ndh0->icmp.type == ICMP6_neighbor_solicitation) vnet_buffer (b0)->l2.shg = 0; } } } else { /* * For packet from BVI - set SHG of unicast packet from BVI to 0 so it * is not dropped on output to VXLAN tunnels or other ports with the * same SHG as that of the BVI. */ if (PREDICT_FALSE (vnet_buffer (b0)->sw_if_index[VLIB_TX] == L2INPUT_BVI)) vnet_buffer (b0)->l2.shg = 0; } if (l2_input_is_bridge (config)) { /* Do bridge-domain processing */ /* save BD ID for next feature graph nodes */ vnet_buffer (b0)->l2.bd_index = config->bd_index; /* Save bridge domain and interface seq_num */ vnet_buffer (b0)->l2.l2fib_sn = l2_fib_mk_seq_num (config->bd_seq_num, config->seq_num); vnet_buffer (b0)->l2.bd_age = config->bd_mac_age; /* * Process bridge domain feature enables. * To perform learning/flooding/forwarding, the corresponding bit * must be enabled in both the input interface config and in the * bridge domain config. In the bd_bitmap, bits for features other * than learning/flooding/forwarding should always be set. */ feat_mask = feat_mask & config->bd_feature_bitmap; } else if (l2_input_is_xconnect (config)) { /* Set the output interface */ vnet_buffer (b0)->sw_if_index[VLIB_TX] = config->output_sw_if_index; } else feat_mask = L2INPUT_FEAT_DROP; /* mask out features from bitmap using packet type and bd config */ u32 feature_bitmap = config->feature_bitmap & feat_mask; /* save for next feature graph nodes */ vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap; /* Determine the next node */ *next0 = feat_bitmap_get_next_node_index (msm->feat_next_node_index, feature_bitmap); } static_always_inline uword l2input_node_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int do_trace) { u32 n_left, *from; l2input_main_t *msm = &l2input_main; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; u16 nexts[VLIB_FRAME_SIZE], *next = nexts; from = vlib_frame_vector_args (frame); n_left = frame->n_vectors; /* number of packets to process */ vlib_get_buffers (vm, from, bufs, n_left); while (n_left > 0) { while (n_left >= 8) { u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3; /* Prefetch next iteration. */ { /* Prefetch the buffer header and packet for the N+2 loop iteration */ vlib_prefetch_buffer_header (b[4], LOAD); vlib_prefetch_buffer_header (b[5], LOAD); vlib_prefetch_buffer_header (b[6], LOAD); vlib_prefetch_buffer_header (b[7], LOAD); CLIB_PREFETCH (b[4]->data, CLIB_CACHE_LINE_BYTES, STORE); CLIB_PREFETCH (b[5]->data, CLIB_CACHE_LINE_BYTES, STORE); CLIB_PREFETCH (b[6]->data, CLIB_CACHE_LINE_BYTES, STORE); CLIB_PREFETCH (b[7]->data, CLIB_CACHE_LINE_BYTES, STORE); } classify_and_dispatch (msm, b[0], &next[0]); classify_and_dispatch (msm, b[1], &next[1]); classify_and_dispatch (msm, b[2], &next[2]); classify_and_dispatch (msm, b[3], &next[3]); if (do_trace) { /* RX interface handles */ sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX]; sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX]; sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX]; if (b[0]->flags & VLIB_BUFFER_IS_TRACED) { ethernet_header_t *h0 = vlib_buffer_get_current (b[0]); l2input_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t)); t->sw_if_index = sw_if_index0; t->feat_mask = vnet_buffer (b[0])->l2.feature_bitmap; clib_memcpy_fast (t->dst_and_src, h0->dst_address, sizeof (h0->dst_address) + sizeof (h0->src_address)); } if (b[1]->flags & VLIB_BUFFER_IS_TRACED) { ethernet_header_t *h1 = vlib_buffer_get_current (b[1]); l2input_trace_t *t = vlib_add_trace (vm, node, b[1], sizeof (*t)); t->sw_if_index = sw_if_index1; t->feat_mask = vnet_buffer (b[1])->l2.feature_bitmap; clib_memcpy_fast (t->dst_and_src, h1->dst_address, sizeof (h1->dst_address) + sizeof (h1->src_address)); } if (b[2]->flags & VLIB_BUFFER_IS_TRACED) { ethernet_header_t *h2 = vlib_buffer_get_current (b[2]); l2input_trace_t *t = vlib_add_trace (vm, node, b[2], sizeof (*t)); t->sw_if_index = sw_if_index2; t->feat_mask = vnet_buffer (b[2])->l2.feature_bitmap; clib_memcpy_fast (t->dst_and_src, h2->dst_address, sizeof (h2->dst_address) + sizeof (h2->src_address)); } if (b[3]->flags & VLIB_BUFFER_IS_TRACED) { ethernet_header_t *h3 = vlib_buffer_get_current (b[3]); l2input_trace_t *t = vlib_add_trace (vm, node, b[3], sizeof (*t)); t->sw_if_index = sw_if_index3; t->feat_mask = vnet_buffer (b[3])->l2.feature_bitmap; clib_memcpy_fast (t->dst_and_src, h3->dst_address, sizeof (h3->dst_address) + sizeof (h3->src_address)); } } b += 4; n_left -= 4; next += 4; } while (n_left > 0) { classify_and_dispatch (msm, b[0], &next[0]); if (do_trace && PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { ethernet_header_t *h0 = vlib_buffer_get_current (b[0]); l2input_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t)); t->sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; t->feat_mask = vnet_buffer (b[0])->l2.feature_bitmap; clib_memcpy_fast (t->dst_and_src, h0->dst_address, sizeof (h0->dst_address) + sizeof (h0->src_address)); } b += 1; next += 1; n_left -= 1; } } vlib_node_increment_counter (vm, l2input_node.index, L2INPUT_ERROR_L2INPUT, frame->n_vectors); vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); return frame->n_vectors; } VLIB_NODE_FN (l2input_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE))) return l2input_node_inline (vm, node, frame, 1 /* do_trace */ ); return l2input_node_inline (vm, node, frame, 0 /* do_trace */ ); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (l2input_node) = { .name = "l2-input", .vector_size = sizeof (u32), .format_trace = format_l2input_trace, .format_buffer = format_ethernet_header_with_length, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(l2input_error_strings), .error_strings = l2input_error_strings, .n_next_nodes = L2INPUT_N_NEXT, /* edit / add dispositions here */ .next_nodes = { [L2INPUT_NEXT_LEARN] = "l2-learn", [L2INPUT_NEXT_FWD] = "l2-fwd", [L2INPUT_NEXT_DROP] = "error-drop", }, }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */