aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/interface.api
AgeCommit message (Expand)AuthorFilesLines
2021-02-15interface: Add promisc on/off in apiNathan Skrzypczak1-0/+14
2020-04-23ip: Replace Sematics for Interface IP addressesNeale Ranns1-0/+37
2019-11-22papi: support default for type alias decaying to basetypeOle Troan1-1/+1
2019-11-12interface: Allow VLAN tag-rewrite on non-sub-interfaces too.Jon Loeliger1-1/+1
2019-11-07interface: shmemioerror while getting name_filter argAleksander Djuric1-1/+1
2019-10-09interface: dump the interface device typeMohsin Kazmi1-0/+2
2019-10-09interface: callback to manage extra MAC addressesMatthew Smith1-1/+17
2019-09-19api: split vl_api_prefix into twoOle Troan1-1/+1
2019-09-03api: enforce vla is last and fixed string typeOle Troan1-5/+5
2019-08-20api: Cleanup APIs interface.apiJakub Grajciar1-120/+96
2019-05-23Fix vpp crash bug while deleting dhcp clientjackiechen19851-1/+1
2019-05-02vpp_papi_provider.py: update defautmapping.Paul Vinciguerra1-1/+1
2019-04-23API sw_interface_dump: Dump all if index is zeroVratko Polak1-3/+4
2019-04-17Make sw_interface_dump more compatible with 2.2.0Vratko Polak1-6/+5
2019-04-17api: Add to interface crud - read by sw_if_index.Paul Vinciguerra1-1/+3
2019-03-15Revert "API: Cleanup APIs interface.api"Ole Trøan1-97/+124
2019-03-15API: Cleanup APIs interface.apiJakub Grajciar1-124/+97
2019-03-14Update documentation for src/vnet/interface.api sw_interface_dumpPaul Vinciguerra1-1/+7
2018-12-17stats: Deprecate old stats frameworkOle Troan1-74/+0
2018-11-29API: Add support for type aliasesOle Troan1-0/+18
2018-11-08vnet: store hw interface speed in kbps instead of using flagsDamjan Marion1-4/+4
2018-09-26itf: dump interface rx-placementMohsin Kazmi1-1/+42
2018-09-24Trivial: Clean up some typos.Paul Vinciguerra1-1/+1
2018-08-24rx-placement: Add API call for interface rx-placementMohsin Kazmi1-0/+18
2018-07-20IP directed broadcastNeale Ranns1-0/+14
2018-06-11MTU: Software interface / Per-protocol MTU supportOle Troan1-3/+17
2018-04-13Revert "MTU: Setting of MTU on software interface (instead of hardware interf...Damjan Marion1-2/+2
2018-04-13MTU: Setting of MTU on software interface (instead of hardware interface)Ole Troan1-2/+2
2018-03-21Detailed Interface stats API takes sw_if_indexNeale Ranns1-1/+4
2018-03-19Interface Unicast, Multicast and Broadcast stats on the APINeale Ranns1-4/+40
2018-03-16IPv6 ND Router discovery control plane (VPP-1095)Juraj Sloboda1-1/+25
2018-03-06API: Add service definitions for events and singleton messages (second attempt)Marek Gradzki1-60/+5
2018-03-05Revert "API: Add service definitions for events and singleton messages."Ole Trøan1-5/+60
2018-03-05API: Add service definitions for events and singleton messages.Ole Troan1-60/+5
2018-01-23VPPAPIGEN: vppapigen replacement in Python PLY.Ole Troan1-1/+1
2017-11-10Break up vpe.apiNeale Ranns1-0/+137
2017-10-29devices: Add binary API for set interface <interface> rx-modeSteven1-0/+20
2017-10-09vppapigen: support per-file (major,minor,patch) version stampsDave Barach1-0/+2
2017-09-14Stats refactorKeith Burns (alagalah)1-2/+74
2017-08-16No context in SW interface eventNeale Ranns1-2/+4
2017-08-11Dedicated SW Interface EventNeale Ranns1-3/+16
2017-05-15Fix vnet_interface_counters API definitionAloys Augustin1-7/+28
2017-04-25"autoreply" flag: autogenerate standard xxx_reply_t messagesDave Barach1-99/+9
2017-04-03Add an API call to set interface MAC addresses.Jon Loeliger1-1/+25
2017-02-21VPP-540 : pbb tag rewrite detailsPavel Kotucek1-2/+17
2016-12-28Reorganize source tree to use single autotools instanceDamjan Marion1-0/+339
n>; vlib_buffer_t *b0; bier_bp_t fbs; int bucket; bi0 = from[0]; from += 1; n_left_from -= 1; b0 = vlib_get_buffer (vm, bi0); bh0 = vlib_buffer_get_current (b0); bti0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; /* * default to drop so that if no bits are matched then * that is where we go - DROP. */ next0 = BIER_LOOKUP_NEXT_DROP; /* * At the imposition or input node, * we stored the BIER Table index in the TX adjacency */ bt0 = bier_table_get(vnet_buffer(b0)->ip.adj_index[VLIB_TX]); /* * we should only forward via one for the ECMP tables */ ASSERT(!bier_table_is_main(bt0)); /* * number of integer sized buckets */ n_bytes = bier_hdr_len_id_to_num_buckets(bt0->bt_id.bti_hdr_len); vnet_buffer(b0)->mpls.bier.n_bytes = n_bytes; vnet_buffer(b0)->sw_if_index[VLIB_TX] = ~0; num_buckets = n_bytes / sizeof(int); bier_bit_string_init(&bbs, bt0->bt_id.bti_hdr_len, buckets_copy); memcpy(bbs.bbs_buckets, bh0->bh_bit_string, bbs.bbs_len); /* * reset the fmask and clone storage vectors */ vec_reset_length (blm->blm_fmasks[thread_index]); vec_reset_length (blm->blm_clones[thread_index]); /* * Loop through the buckets in the header */ for (index = 0; index < num_buckets; index++) { /* * loop through each bit in the bucket */ bucket = ((int*)bbs.bbs_buckets)[index]; while (bucket) { fbs = bier_find_first_bit_string_set(bucket); fbs += (((num_buckets - 1) - index) * BIER_BIT_MASK_BITS_PER_INT); bfmi0 = bier_table_fwd_lookup(bt0, fbs); /* * whatever happens, the bit we just looked for * MUST be cleared from the packet * otherwise we could be in this loop a while ... */ bier_bit_string_clear_bit(&bbs, fbs); if (PREDICT_TRUE(INDEX_INVALID != bfmi0)) { bfm0 = bier_fmask_get(bfmi0); /* * use the bit-string on the fmask to reset * the bits in the header we are walking */ bier_bit_string_clear_string( &bfm0->bfm_bits.bfmb_input_reset_string, &bbs); bucket = ((int*)bbs.bbs_buckets)[index]; /* * the fmask is resolved so replicate a * packet its way */ next0 = BIER_LOOKUP_NEXT_OUTPUT; vec_add1 (blm->blm_fmasks[thread_index], bfmi0); } else { /* * go to the next bit-position set */ vlib_node_increment_counter( vm, node->node_index, BIER_LOOKUP_ERROR_FMASK_UNRES, 1); bucket = ((int*)bbs.bbs_buckets)[index]; continue; } } } /* * Full mask now processed. * Create the number of clones we need based on the number * of fmasks we are sending to. */ u16 num_cloned, clone; u32 n_clones; n_clones = vec_len(blm->blm_fmasks[thread_index]); if (PREDICT_TRUE(0 != n_clones)) { num_cloned = vlib_buffer_clone(vm, bi0, blm->blm_clones[thread_index], n_clones, VLIB_BUFFER_CLONE_HEAD_SIZE); if (num_cloned != vec_len(blm->blm_fmasks[thread_index])) { vlib_node_increment_counter (vm, node->node_index, BIER_LOOKUP_ERROR_BUFFER_ALLOCATION_FAILURE, 1); } for (clone = 0; clone < num_cloned; clone++) { vlib_buffer_t *c0; u32 ci0; ci0 = blm->blm_clones[thread_index][clone]; c0 = vlib_get_buffer(vm, ci0); vnet_buffer(c0)->ip.adj_index[VLIB_TX] = blm->blm_fmasks[thread_index][clone]; to_next[0] = ci0; to_next += 1; n_left_to_next -= 1; if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { bier_lookup_trace_t *tr; if (c0 != b0) vlib_buffer_copy_trace_flag (vm, b0, ci0); tr = vlib_add_trace (vm, node, c0, sizeof (*tr)); tr->bt_index = bti0; tr->bfm_index = blm->blm_fmasks[thread_index][clone]; } vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, ci0, next0); /* * After the enqueue it is possible that we over-flow the * frame of the to-next node. When this happens we need to * 'put' that full frame to the node and get a fresh empty * one. Note that these are macros with side effects that * change to_next & n_left_to_next */ if (PREDICT_FALSE(0 == n_left_to_next)) { vlib_put_next_frame (vm, node, next_index, n_left_to_next); vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); } } } else { /* * no clones/replications required. drop this packet */ next0 = BIER_LOOKUP_NEXT_DROP; to_next[0] = bi0; to_next += 1; n_left_to_next -= 1; if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { bier_lookup_trace_t *tr; tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->bt_index = bti0; tr->bfm_index = ~0; } vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0); } } vlib_put_next_frame(vm, node, next_index, n_left_to_next); } vlib_node_increment_counter(vm, bier_lookup_node.index, BIER_LOOKUP_ERROR_NONE, from_frame->n_vectors); return (from_frame->n_vectors); } static u8 * format_bier_lookup_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); bier_lookup_trace_t * t = va_arg (*args, bier_lookup_trace_t *); s = format (s, "BIER: next [%d], tbl:%d BFM:%d", t->next_index, t->bt_index, t->bfm_index); return s; } VLIB_REGISTER_NODE (bier_lookup_node) = { .function = bier_lookup, .name = "bier-lookup", /* Takes a vector of packets. */ .vector_size = sizeof (u32), .n_errors = BIER_LOOKUP_N_ERROR, .error_strings = bier_lookup_error_strings, .format_trace = format_bier_lookup_trace, .n_next_nodes = BIER_LOOKUP_N_NEXT, .next_nodes = { [BIER_LOOKUP_NEXT_DROP] = "bier-drop", [BIER_LOOKUP_NEXT_OUTPUT] = "bier-output", }, }; clib_error_t * bier_lookup_module_init (vlib_main_t * vm) { bier_lookup_main_t *blm = &bier_lookup_main; u32 thread_index; vec_validate (blm->blm_clones, vlib_num_workers()); vec_validate (blm->blm_fmasks, vlib_num_workers()); for (thread_index = 0; thread_index <= vlib_num_workers(); thread_index++) { /* * 1024 is the most we will ever need to support * a Bit-Mask length of 1024 */ vec_validate(blm->blm_fmasks[thread_index], 1023); vec_validate(blm->blm_clones[thread_index], 1023); } return 0; } VLIB_INIT_FUNCTION (bier_lookup_module_init);