summaryrefslogtreecommitdiffstats
path: root/src/vnet/lisp-cp
AgeCommit message (Expand)AuthorFilesLines
2018-08-22Consolidate the [un]format_mac_address implementationsNeale Ranns2-18/+0
2018-08-15Remove client_index field from replies in APIOndrej Fabry2-4/+0
2018-02-02lisp-cp: fix handling of ndp without source link addr VPP-1159Florin Coras1-4/+21
2018-01-23VPPAPIGEN: vppapigen replacement in Python PLY.Ole Troan2-2/+2
2018-01-11api: remove transport specific code from handlersFlorin Coras2-191/+124
2018-01-09api: refactor vlibmemoryFlorin Coras2-46/+36
2018-01-05sock api: add infra for bootstrapping shm clientsFlorin Coras1-14/+0
2017-12-10VPP-1077 Add meaningful error info, when executing command with enable/disabl...Swarup Nayak2-11/+11
2017-11-02LISP: fix negative mapping timeout, VPP-1043Filip Tehlar1-1/+2
2017-10-31LISP: add P-ITR/P-ETR/xTR API handlers, ONE-24Filip Tehlar6-63/+552
2017-10-30LISP: improve updating the dataplne when locators changeFilip Tehlar5-28/+74
2017-10-20null-terminate some formatted stringGabriel Ganne2-0/+13
2017-10-18LISP: fix crash when sending NSH map-request message, ONE-32Filip Tehlar1-0/+3
2017-10-16LISP: fix map-request counters, ONE-25Filip Tehlar1-6/+5
2017-10-09vppapigen: support per-file (major,minor,patch) version stampsDave Barach2-0/+4
2017-10-03Repair vlib API socket serverDave Barach2-1/+10
2017-09-27LISP: add API handlers for set/get transport protocolFilip Tehlar3-0/+62
2017-09-20LISP: add debug cli for neighbor discoveryFilip Tehlar4-6/+71
2017-09-19LISP: support for neighbor discoveryFilip Tehlar8-110/+384
2017-09-19Remove associated lisp-gpe entries when removing lisp local mapping.Alberto Rodriguez-Natal2-7/+76
2017-09-07LISP: add neighbor discovery and CP protocol separation APIsFilip Tehlar2-0/+86
2017-09-07LISP: Add APIs for enable/disable xTR/P-ITR/P-ETR modesFilip Tehlar1-0/+60
2017-09-04LISP: re-fetch mapping before it expiresFilip Tehlar7-124/+272
2017-08-12LISP: fix fid nsh address formattingFlorin Coras1-1/+8
2017-08-11LISP: fix wrong reply message in map_register_fallback_threshold callFilip Tehlar1-1/+1
2017-08-10Fix LISP cp buffer leakageFlorin Coras1-1/+3
2017-08-07LISP: Map-server fallback featureFilip Tehlar5-58/+311
2017-08-05LISP: fix map register TTL reply handler, VPP-926Filip Tehlar1-1/+1
2017-08-02LISP: make TTL for map register messages configurableFilip Tehlar5-0/+153
2017-08-02Make ip csum configurable in vlib buffer functionsFlorin Coras4-15/+17
2017-07-26Fix lisp udp checksumFlorin Coras1-6/+18
2017-07-14Introduce l{2,3,4}_hdr_offset fields in the buffer metadataDamjan Marion1-1/+1
2017-07-06VPP-902: LISP-CP: Wrong size in one_l2_arp_entries_get message.Ole Troan1-1/+1
2017-06-28switch vlib process model to tw_timer_template timer implDave Barach1-0/+1
2017-06-22Update lisp map record default ttl to 24hv17.10-rc0Florin Coras1-2/+2
2017-06-17Fix map-notify processing with multiple workersFlorin Coras2-13/+44
2017-06-12Fix coverity issueFilip Tehlar1-0/+1
2017-06-08LISP: add NSH supportFilip Tehlar10-28/+572
2017-06-06Fix lisp map-notify parsingFlorin Coras1-0/+1
2017-05-30LISP: L2 ARP handlingFilip Tehlar9-36/+600
2017-05-04Fix coverity issueFilip Tehlar1-0/+2
2017-05-04LISP: group mapping records in map-register messageFilip Tehlar1-6/+19
2017-05-03Fix vnet unit testsFilip Tehlar4-22/+40
2017-04-27LISP: fix deleting of locators, VPP-713Filip Tehlar1-0/+4
2017-04-25"autoreply" flag: autogenerate standard xxx_reply_t messagesDave Barach2-319/+30
2017-04-25LISP: clean DP when deleting locators in useFilip Tehlar1-14/+76
2017-04-13LISP: make statistics thread safeFilip Tehlar4-9/+41
2017-04-11LISP: show mapping negative action in CLIFilip Tehlar1-2/+3
2017-04-05LISP: fix crash when GPE interface is re-added, VPP-685Filip Tehlar1-3/+3
2017-03-30LISP: Do not show P-ITR generated mappingFilip Tehlar2-0/+6
"n">c0 != 0)) { vnet_buffer (c0)->sw_if_index[VLIB_TX] = i; c0->flags |= VNET_BUFFER_F_SPAN_CLONE; if (sf == SPAN_FEAT_L2) vnet_buffer (c0)->l2.feature_bitmap = L2OUTPUT_FEAT_OUTPUT; to_mirror_next[0] = vlib_get_buffer_index (vm, c0); mirror_frames[i]->n_vectors++; if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { span_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t)); t->src_sw_if_index = sw_if_index0; t->mirror_sw_if_index = i; #if 0 /* Enable this path to allow packet trace of SPAN packets. Note that all SPAN packets will show up on the trace output with the first SPAN packet (since they are in the same frame) thus making trace output of the original packet confusing */ mirror_frames[i]->flags |= VLIB_FRAME_TRACE; c0->flags |= VLIB_BUFFER_IS_TRACED; #endif } } })); /* *INDENT-ON* */ } static_always_inline uword span_node_inline_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, vlib_rx_or_tx_t rxtx, span_feat_t sf) { span_main_t *sm = &span_main; vnet_main_t *vnm = vnet_get_main (); u32 n_left_from, *from, *to_next; u32 next_index; u32 sw_if_index; static __thread vlib_frame_t **mirror_frames = 0; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; vec_validate_aligned (mirror_frames, sm->max_sw_if_index, CLIB_CACHE_LINE_BYTES); while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from >= 4 && n_left_to_next >= 2) { u32 bi0; u32 bi1; vlib_buffer_t *b0; vlib_buffer_t *b1; u32 sw_if_index0; u32 next0 = 0; u32 sw_if_index1; u32 next1 = 0; /* speculatively enqueue b0, b1 to the current next frame */ to_next[0] = bi0 = from[0]; to_next[1] = bi1 = from[1]; to_next += 2; n_left_to_next -= 2; from += 2; n_left_from -= 2; b0 = vlib_get_buffer (vm, bi0); b1 = vlib_get_buffer (vm, bi1); sw_if_index0 = vnet_buffer (b0)->sw_if_index[rxtx]; sw_if_index1 = vnet_buffer (b1)->sw_if_index[rxtx]; span_mirror (vm, node, sw_if_index0, b0, mirror_frames, rxtx, sf); span_mirror (vm, node, sw_if_index1, b1, mirror_frames, rxtx, sf); switch (sf) { case SPAN_FEAT_L2: if (rxtx == VLIB_RX) { next0 = vnet_l2_feature_next (b0, sm->l2_input_next, L2INPUT_FEAT_SPAN); next1 = vnet_l2_feature_next (b1, sm->l2_input_next, L2INPUT_FEAT_SPAN); } else { next0 = vnet_l2_feature_next (b0, sm->l2_output_next, L2OUTPUT_FEAT_SPAN); next1 = vnet_l2_feature_next (b1, sm->l2_output_next, L2OUTPUT_FEAT_SPAN); } break; case SPAN_FEAT_DEVICE: default: vnet_feature_next (&next0, b0); vnet_feature_next (&next1, b1); break; } /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1); } while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; u32 sw_if_index0; u32 next0 = 0; /* speculatively enqueue b0 to the current next frame */ to_next[0] = bi0 = from[0]; to_next += 1; n_left_to_next -= 1; from += 1; n_left_from -= 1; b0 = vlib_get_buffer (vm, bi0); sw_if_index0 = vnet_buffer (b0)->sw_if_index[rxtx]; span_mirror (vm, node, sw_if_index0, b0, mirror_frames, rxtx, sf); switch (sf) { case SPAN_FEAT_L2: if (rxtx == VLIB_RX) next0 = vnet_l2_feature_next (b0, sm->l2_input_next, L2INPUT_FEAT_SPAN); else next0 = vnet_l2_feature_next (b0, sm->l2_output_next, L2OUTPUT_FEAT_SPAN); break; case SPAN_FEAT_DEVICE: default: vnet_feature_next (&next0, b0); break; } /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } for (sw_if_index = 0; sw_if_index < vec_len (mirror_frames); sw_if_index++) { vlib_frame_t *f = mirror_frames[sw_if_index]; if (f == 0) continue; if (sf == SPAN_FEAT_L2) vlib_put_frame_to_node (vm, l2output_node.index, f); else vnet_put_frame_to_sw_interface (vnm, sw_if_index, f); mirror_frames[sw_if_index] = 0; } return frame->n_vectors; } VLIB_NODE_FN (span_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return span_node_inline_fn (vm, node, frame, VLIB_RX, SPAN_FEAT_DEVICE); } VLIB_NODE_FN (span_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return span_node_inline_fn (vm, node, frame, VLIB_TX, SPAN_FEAT_DEVICE); } VLIB_NODE_FN (span_l2_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return span_node_inline_fn (vm, node, frame, VLIB_RX, SPAN_FEAT_L2); } VLIB_NODE_FN (span_l2_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return span_node_inline_fn (vm, node, frame, VLIB_TX, SPAN_FEAT_L2); } #define span_node_defs \ .vector_size = sizeof (u32), \ .format_trace = format_span_trace, \ .type = VLIB_NODE_TYPE_INTERNAL, \ .n_errors = ARRAY_LEN(span_error_strings), \ .error_strings = span_error_strings, \ .n_next_nodes = 0, \ .next_nodes = { \ [0] = "error-drop" \ } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (span_input_node) = { span_node_defs, .name = "span-input", }; VLIB_REGISTER_NODE (span_output_node) = { span_node_defs, .name = "span-output", }; VLIB_REGISTER_NODE (span_l2_input_node) = { span_node_defs, .name = "span-l2-input", }; VLIB_REGISTER_NODE (span_l2_output_node) = { span_node_defs, .name = "span-l2-output", }; #ifndef CLIB_MARCH_VARIANT clib_error_t *span_init (vlib_main_t * vm) { span_main_t *sm = &span_main; sm->vlib_main = vm; sm->vnet_main = vnet_get_main (); /* Initialize the feature next-node indexes */ feat_bitmap_init_next_nodes (vm, span_l2_input_node.index, L2INPUT_N_FEAT, l2input_get_feat_names (), sm->l2_input_next); feat_bitmap_init_next_nodes (vm, span_l2_output_node.index, L2OUTPUT_N_FEAT, l2output_get_feat_names (), sm->l2_output_next); return 0; } VLIB_INIT_FUNCTION (span_init); /* *INDENT-ON* */ #endif /* CLIB_MARCH_VARIANT */ #undef span_node_defs /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */