From 7cd468a3d7dee7d6c92f69a0bb7061ae208ec727 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 19 Dec 2016 23:05:39 +0100 Subject: Reorganize source tree to use single autotools instance Change-Id: I7b51f88292e057c6443b12224486f2d0c9f8ae23 Signed-off-by: Damjan Marion --- src/vnet/fib/fib_entry_src_rr.c | 293 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 293 insertions(+) create mode 100644 src/vnet/fib/fib_entry_src_rr.c (limited to 'src/vnet/fib/fib_entry_src_rr.c') diff --git a/src/vnet/fib/fib_entry_src_rr.c b/src/vnet/fib/fib_entry_src_rr.c new file mode 100644 index 00000000..ff15c54e --- /dev/null +++ b/src/vnet/fib/fib_entry_src_rr.c @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include "fib_entry_src.h" +#include "fib_entry_cover.h" +#include "fib_entry.h" +#include "fib_table.h" + +/* + * fib_entry_src_rr_resolve_via_connected + * + * Resolve via a connected cover. + */ +static void +fib_entry_src_rr_resolve_via_connected (fib_entry_src_t *src, + const fib_entry_t *fib_entry, + const fib_entry_t *cover) +{ + const fib_route_path_t path = { + .frp_proto = fib_entry->fe_prefix.fp_proto, + .frp_addr = fib_entry->fe_prefix.fp_addr, + .frp_sw_if_index = fib_entry_get_resolving_interface( + fib_entry_get_index(cover)), + .frp_fib_index = ~0, + .frp_weight = 1, + }; + fib_route_path_t *paths = NULL; + vec_add1(paths, path); + + /* + * since the cover is connected, the address this entry corresponds + * to is a peer (ARP-able for) on the interface to which the cover is + * connected. The fact we resolve via the cover, just means this RR + * source is the first SRC to use said peer. The ARP source will be along + * shortly to over-rule this RR source. + */ + src->fes_pl = fib_path_list_create(FIB_PATH_LIST_FLAG_NONE, paths); + src->fes_entry_flags = fib_entry_get_flags(fib_entry_get_index(cover)); + + vec_free(paths); +} + + +/** + * Source initialisation Function + */ +static void +fib_entry_src_rr_init (fib_entry_src_t *src) +{ + src->rr.fesr_cover = FIB_NODE_INDEX_INVALID; + src->rr.fesr_sibling = FIB_NODE_INDEX_INVALID; +} + +/* + * Source activation. Called when the source is the new best source on the entry + */ +static int +fib_entry_src_rr_activate (fib_entry_src_t *src, + const fib_entry_t *fib_entry) +{ + fib_entry_t *cover; + + /* + * find the covering prefix. become a dependent thereof. + * for IP there should always be a cover, though it may be the default route. + * For MPLS there is never a cover. + */ + if (FIB_PROTOCOL_MPLS == fib_entry->fe_prefix.fp_proto) + { + src->fes_pl = fib_path_list_create_special(FIB_PROTOCOL_MPLS, + FIB_PATH_LIST_FLAG_DROP, + NULL); + fib_path_list_lock(src->fes_pl); + return (!0); + } + + src->rr.fesr_cover = fib_table_get_less_specific(fib_entry->fe_fib_index, + &fib_entry->fe_prefix); + + ASSERT(FIB_NODE_INDEX_INVALID != src->rr.fesr_cover); + + cover = fib_entry_get(src->rr.fesr_cover); + + src->rr.fesr_sibling = + fib_entry_cover_track(cover, fib_entry_get_index(fib_entry)); + + /* + * if the ocver is attached then install an attached-host path + * (like an adj-fib). Otherwise inherit the forwarding from the cover + */ + if (FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags_i(cover)) + { + fib_entry_src_rr_resolve_via_connected(src, fib_entry, cover); + } + else + { + /* + * use the path-list of the cover, unless it would form a loop. + * that is unless the cover is via this entry. + * If a loop were to form it would be a 1 level loop (i.e. X via X), + * and there would be 2 locks on the path-list; one since its used + * by the cover, and 1 from here. The first lock will go when the + * cover is removed, the second, and last, when the covered walk + * occurs during the cover's removel - this is not a place where + * we can handle last lock gone. + * In short, don't let the loop form. The usual rules of 'we must + * let it form so we know when it breaks' don't apply here, since + * the loop will break when the cover changes, and this function + * will be called again when that happens. + */ + fib_node_index_t *entries = NULL; + fib_protocol_t proto; + + proto = fib_entry->fe_prefix.fp_proto; + vec_add1(entries, fib_entry_get_index(fib_entry)); + + if (fib_path_list_recursive_loop_detect(cover->fe_parent, + &entries)) + { + src->fes_pl = fib_path_list_create_special( + proto, + FIB_PATH_LIST_FLAG_DROP, + drop_dpo_get(fib_proto_to_dpo(proto))); + } + else + { + src->fes_pl = cover->fe_parent; + } + vec_free(entries); + + } + fib_path_list_lock(src->fes_pl); + + /* + * return go for install + */ + return (!0); +} + +/** + * Source Deactivate. + * Called when the source is no longer best source on the entry + */ +static void +fib_entry_src_rr_deactivate (fib_entry_src_t *src, + const fib_entry_t *fib_entry) +{ + fib_entry_t *cover; + + /* + * remove the depednecy on the covering entry + */ + if (FIB_NODE_INDEX_INVALID != src->rr.fesr_cover) + { + cover = fib_entry_get(src->rr.fesr_cover); + fib_entry_cover_untrack(cover, src->rr.fesr_sibling); + src->rr.fesr_cover = FIB_NODE_INDEX_INVALID; + } + + fib_path_list_unlock(src->fes_pl); + src->fes_pl = FIB_NODE_INDEX_INVALID; + src->fes_entry_flags = FIB_ENTRY_FLAG_NONE; +} + +static fib_entry_src_cover_res_t +fib_entry_src_rr_cover_change (fib_entry_src_t *src, + const fib_entry_t *fib_entry) +{ + fib_entry_src_cover_res_t res = { + .install = !0, + .bw_reason = FIB_NODE_BW_REASON_FLAG_NONE, + }; + + if (FIB_NODE_INDEX_INVALID == src->rr.fesr_cover) + { + /* + * the source may be added, but it is not active + * if it is not tracking the cover. + */ + return (res); + } + + /* + * this function is called when this entry's cover has a more specific + * entry inserted benaeth it. That does not necessarily mean that this + * entry is covered by the new prefix. check that + */ + if (src->rr.fesr_cover != fib_table_get_less_specific(fib_entry->fe_fib_index, + &fib_entry->fe_prefix)) + { + fib_entry_src_rr_deactivate(src, fib_entry); + fib_entry_src_rr_activate(src, fib_entry); + + /* + * dependent children need to re-resolve to the new forwarding info + */ + res.bw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE; + } + return (res); +} + +/* + * fib_entry_src_rr_cover_update + * + * This entry's cover has updated its forwarding info. This entry + * will need to re-inheret. + */ +static fib_entry_src_cover_res_t +fib_entry_src_rr_cover_update (fib_entry_src_t *src, + const fib_entry_t *fib_entry) +{ + fib_entry_src_cover_res_t res = { + .install = !0, + .bw_reason = FIB_NODE_BW_REASON_FLAG_NONE, + }; + fib_node_index_t old_path_list; + fib_entry_t *cover; + + if (FIB_NODE_INDEX_INVALID == src->rr.fesr_cover) + { + /* + * the source may be added, but it is not active + * if it is not tracking the cover. + */ + return (res); + } + + cover = fib_entry_get(src->rr.fesr_cover); + old_path_list = src->fes_pl; + + /* + * if the ocver is attached then install an attached-host path + * (like an adj-fib). Otherwise inherit the forwarding from the cover + */ + if (FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags_i(cover)) + { + fib_entry_src_rr_resolve_via_connected(src, fib_entry, cover); + } + else + { + src->fes_pl = cover->fe_parent; + } + fib_path_list_lock(src->fes_pl); + fib_path_list_unlock(old_path_list); + + /* + * dependent children need to re-resolve to the new forwarding info + */ + res.bw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE; + + return (res); +} + +static u8* +fib_entry_src_rr_format (fib_entry_src_t *src, + u8* s) +{ + return (format(s, "cover:%d", src->rr.fesr_cover)); +} + +const static fib_entry_src_vft_t rr_src_vft = { + .fesv_init = fib_entry_src_rr_init, + .fesv_activate = fib_entry_src_rr_activate, + .fesv_deactivate = fib_entry_src_rr_deactivate, + .fesv_cover_change = fib_entry_src_rr_cover_change, + .fesv_cover_update = fib_entry_src_rr_cover_update, + .fesv_format = fib_entry_src_rr_format, +}; + +void +fib_entry_src_rr_register (void) +{ + fib_entry_src_register(FIB_SOURCE_RR, &rr_src_vft); + fib_entry_src_register(FIB_SOURCE_URPF_EXEMPT, &rr_src_vft); +} -- cgit 1.2.3-korg From f12a83f54ff2239d70494d577af3e1bb253692e1 Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Tue, 18 Apr 2017 09:09:40 -0700 Subject: Improve Load-Balance MAPs - only build them for popular path-lists (where popular means more than 64 children) the reason to have a map is to improve convergence speed for recursive prefixes - if there are only a few this technique is not needed - only build them when there is at least one path that has recursive constraints, i.e. a path that can 'fail' in a PIC scenario. - Use the MAPS in the switch path. - PIC test cases for functionality (not convergence performance) Change-Id: I70705444c8469d22b07ae34be82cfb6a01358e10 Signed-off-by: Neale Ranns --- src/vnet/dpo/load_balance.c | 3 +- src/vnet/dpo/load_balance_map.h | 31 ++++ src/vnet/fib/fib_entry_src.c | 15 +- src/vnet/fib/fib_entry_src_rr.c | 2 +- src/vnet/fib/fib_path.c | 6 +- src/vnet/fib/fib_path.h | 2 +- src/vnet/fib/fib_path_list.c | 73 ++++++-- src/vnet/fib/fib_path_list.h | 24 +-- src/vnet/fib/fib_table.c | 12 +- src/vnet/fib/fib_test.c | 116 +++++++++++-- src/vnet/fib/fib_walk.c | 122 +++++++++++--- src/vnet/fib/fib_walk.h | 3 + src/vnet/ip/ip4_forward.c | 137 ++++++++------- src/vnet/ip/ip6_forward.c | 104 ++++++++---- src/vnet/ip/ip6_neighbor.c | 2 +- src/vnet/mpls/mpls_lookup.c | 118 ++++++++----- test/test_mpls.py | 359 ++++++++++++++++++++++++++++++++++++++++ test/vpp_ip_route.py | 53 ++++-- 18 files changed, 966 insertions(+), 216 deletions(-) (limited to 'src/vnet/fib/fib_entry_src_rr.c') diff --git a/src/vnet/dpo/load_balance.c b/src/vnet/dpo/load_balance.c index 6b0eda0e..af054f1c 100644 --- a/src/vnet/dpo/load_balance.c +++ b/src/vnet/dpo/load_balance.c @@ -118,7 +118,8 @@ load_balance_format (index_t lbi, buckets = load_balance_get_buckets(lb); s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE); - s = format(s, "[index:%d buckets:%d ", lbi, lb->lb_n_buckets); + s = format(s, "[proto:%U ", format_dpo_proto, lb->lb_proto); + s = format(s, "index:%d buckets:%d ", lbi, lb->lb_n_buckets); s = format(s, "uRPF:%d ", lb->lb_urpf); s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes); if (0 != via.packets) diff --git a/src/vnet/dpo/load_balance_map.h b/src/vnet/dpo/load_balance_map.h index 454bf4b3..237f24b0 100644 --- a/src/vnet/dpo/load_balance_map.h +++ b/src/vnet/dpo/load_balance_map.h @@ -73,6 +73,37 @@ load_balance_map_get (index_t lbmi) return (pool_elt_at_index(load_balance_map_pool, lbmi)); } +static inline u16 +load_balance_map_translate (index_t lbmi, + u16 bucket) +{ + load_balance_map_t*lbm; + + lbm = load_balance_map_get(lbmi); + + return (lbm->lbm_buckets[bucket]); +} + +static inline const dpo_id_t * +load_balance_get_fwd_bucket (const load_balance_t *lb, + u16 bucket) +{ + ASSERT(bucket < lb->lb_n_buckets); + + if (INDEX_INVALID != lb->lb_map) + { + bucket = load_balance_map_translate(lb->lb_map, bucket); + } + + if (PREDICT_TRUE(LB_HAS_INLINE_BUCKETS(lb))) + { + return (&lb->lb_buckets_inline[bucket]); + } + else + { + return (&lb->lb_buckets[bucket]); + } +} extern void load_balance_map_module_init(void); diff --git a/src/vnet/fib/fib_entry_src.c b/src/vnet/fib/fib_entry_src.c index a700282e..fd80497c 100644 --- a/src/vnet/fib/fib_entry_src.c +++ b/src/vnet/fib/fib_entry_src.c @@ -192,7 +192,7 @@ typedef struct fib_entry_src_collect_forwarding_ctx_t_ const fib_entry_t *fib_entry; const fib_entry_src_t *esrc; fib_forward_chain_type_t fct; - int is_recursive; + int n_recursive_constrained; } fib_entry_src_collect_forwarding_ctx_t; /** @@ -203,10 +203,11 @@ load_balance_flags_t fib_entry_calc_lb_flags (fib_entry_src_collect_forwarding_ctx_t *ctx) { /** - * We'll use a LB map is the path-list has recursive paths. + * We'll use a LB map if the path-list has multiple recursive paths. * recursive paths implies BGP, and hence scale. */ - if (ctx->is_recursive) + if (ctx->n_recursive_constrained > 1 && + fib_path_list_is_popular(ctx->esrc->fes_pl)) { return (LOAD_BALANCE_FLAG_USES_MAP); } @@ -282,9 +283,9 @@ fib_entry_src_collect_forwarding (fib_node_index_t pl_index, return (!0); } - if (fib_path_is_recursive(path_index)) + if (fib_path_is_recursive_constrained(path_index)) { - ctx->is_recursive = 1; + ctx->n_recursive_constrained += 1; } /* @@ -397,7 +398,7 @@ fib_entry_src_mk_lb (fib_entry_t *fib_entry, .esrc = esrc, .fib_entry = fib_entry, .next_hops = NULL, - .is_recursive = 0, + .n_recursive_constrained = 0, .fct = fct, }; @@ -409,7 +410,7 @@ fib_entry_src_mk_lb (fib_entry_t *fib_entry, vec_validate(ctx.next_hops, fib_path_list_get_n_paths(esrc->fes_pl)); vec_reset_length(ctx.next_hops); - lb_proto = fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto); + lb_proto = fib_forw_chain_type_to_dpo_proto(fct); fib_path_list_walk(esrc->fes_pl, fib_entry_src_collect_forwarding, diff --git a/src/vnet/fib/fib_entry_src_rr.c b/src/vnet/fib/fib_entry_src_rr.c index ff15c54e..c145aaa2 100644 --- a/src/vnet/fib/fib_entry_src_rr.c +++ b/src/vnet/fib/fib_entry_src_rr.c @@ -103,7 +103,7 @@ fib_entry_src_rr_activate (fib_entry_src_t *src, fib_entry_cover_track(cover, fib_entry_get_index(fib_entry)); /* - * if the ocver is attached then install an attached-host path + * if the cover is attached then install an attached-host path * (like an adj-fib). Otherwise inherit the forwarding from the cover */ if (FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags_i(cover)) diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c index 70c87905..889317fd 100644 --- a/src/vnet/fib/fib_path.c +++ b/src/vnet/fib/fib_path.c @@ -2025,13 +2025,15 @@ fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index, } int -fib_path_is_recursive (fib_node_index_t path_index) +fib_path_is_recursive_constrained (fib_node_index_t path_index) { fib_path_t *path; path = fib_path_get(path_index); - return (FIB_PATH_TYPE_RECURSIVE == path->fp_type); + return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) && + ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) || + (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST))); } int diff --git a/src/vnet/fib/fib_path.h b/src/vnet/fib/fib_path.h index 334be6f5..b6bf1e4f 100644 --- a/src/vnet/fib/fib_path.h +++ b/src/vnet/fib/fib_path.h @@ -144,7 +144,7 @@ extern fib_node_index_t fib_path_copy(fib_node_index_t path_index, fib_node_index_t path_list_index); extern int fib_path_resolve(fib_node_index_t path_index); extern int fib_path_is_resolved(fib_node_index_t path_index); -extern int fib_path_is_recursive(fib_node_index_t path_index); +extern int fib_path_is_recursive_constrained(fib_node_index_t path_index); extern int fib_path_is_exclusive(fib_node_index_t path_index); extern int fib_path_is_deag(fib_node_index_t path_index); extern int fib_path_is_looped(fib_node_index_t path_index); diff --git a/src/vnet/fib/fib_path_list.c b/src/vnet/fib/fib_path_list.c index ea6565dd..64917f95 100644 --- a/src/vnet/fib/fib_path_list.c +++ b/src/vnet/fib/fib_path_list.c @@ -25,6 +25,16 @@ #include #include +/** + * The magic number of child entries that make a path-list popular. + * There's a trade-off here between convergnece and forwarding speed. + * Popular path-lists generate load-balance maps for the entires that + * use them. If the map is present there is a switch path cost to indirect + * through the map - this indirection provides the fast convergence - so + * without the map convergence is slower. + */ +#define FIB_PATH_LIST_POPULAR 64 + /** * FIB path-list * A representation of the list/set of path trough which a prefix is reachable @@ -454,14 +464,7 @@ fib_path_list_back_walk (fib_node_index_t path_list_index, /* * propagate the backwalk further */ - if (32 >= fib_node_list_get_size(path_list->fpl_node.fn_children)) - { - /* - * only a few children. continue the walk synchronously - */ - fib_walk_sync(FIB_NODE_TYPE_PATH_LIST, path_list_index, ctx); - } - else + if (path_list->fpl_flags & FIB_PATH_LIST_FLAG_POPULAR) { /* * many children. schedule a async walk @@ -471,6 +474,13 @@ fib_path_list_back_walk (fib_node_index_t path_list_index, FIB_WALK_PRIORITY_LOW, ctx); } + else + { + /* + * only a few children. continue the walk synchronously + */ + fib_walk_sync(FIB_NODE_TYPE_PATH_LIST, path_list_index, ctx); + } } /* @@ -625,6 +635,16 @@ fib_path_list_is_looped (fib_node_index_t path_list_index) return (path_list->fpl_flags & FIB_PATH_LIST_FLAG_LOOPED); } +int +fib_path_list_is_popular (fib_node_index_t path_list_index) +{ + fib_path_list_t *path_list; + + path_list = fib_path_list_get(path_list_index); + + return (path_list->fpl_flags & FIB_PATH_LIST_FLAG_POPULAR); +} + static fib_path_list_flags_t fib_path_list_flags_fixup (fib_path_list_flags_t flags) { @@ -807,6 +827,7 @@ fib_path_list_path_add (fib_node_index_t path_list_index, */ if (0 == fib_path_cmp(new_path_index, *orig_path_index)) { + fib_path_destroy(new_path_index); return (*orig_path_index); } } @@ -1173,10 +1194,38 @@ fib_path_list_child_add (fib_node_index_t path_list_index, fib_node_type_t child_type, fib_node_index_t child_index) { - return (fib_node_child_add(FIB_NODE_TYPE_PATH_LIST, - path_list_index, - child_type, - child_index)); + u32 sibling; + + sibling = fib_node_child_add(FIB_NODE_TYPE_PATH_LIST, + path_list_index, + child_type, + child_index); + + if (FIB_PATH_LIST_POPULAR == fib_node_get_n_children(FIB_NODE_TYPE_PATH_LIST, + path_list_index)) + { + /* + * Set the popular flag on the path-list once we pass the magic + * threshold. then walk children to update. + * We don't undo this action. The rational being that the number + * of entries using this prefix is large enough such that it is a + * non-trival amount of effort to converge them. If we get into the + * situation where we are adding and removing entries such that we + * flip-flop over the threshold, then this non-trivial work is added + * to each of those routes adds/deletes - not a situation we want. + */ + fib_node_back_walk_ctx_t ctx = { + .fnbw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE, + }; + fib_path_list_t *path_list; + + path_list = fib_path_list_get(path_list_index); + path_list->fpl_flags |= FIB_PATH_LIST_FLAG_POPULAR; + + fib_walk_sync(FIB_NODE_TYPE_PATH_LIST, path_list_index, &ctx); + } + + return (sibling); } void diff --git a/src/vnet/fib/fib_path_list.h b/src/vnet/fib/fib_path_list.h index 9d246211..376cb726 100644 --- a/src/vnet/fib/fib_path_list.h +++ b/src/vnet/fib/fib_path_list.h @@ -38,11 +38,6 @@ typedef enum fib_path_list_attribute_t_ { * be searched for each route update. */ FIB_PATH_LIST_ATTRIBUTE_SHARED = FIB_PATH_LIST_ATTRIBUTE_FIRST, - /** - * Indexed means the path-list keeps a hash table of all paths for - * fast lookup. The lookup result is the fib_node_index of the path. - */ - FIB_PATH_LIST_ATTRIBUTE_INDEXED, /** * explicit drop path-list. Used when the entry source needs to * force a drop, despite the fact the path info is present. @@ -65,6 +60,12 @@ typedef enum fib_path_list_attribute_t_ { * looped path-list. one path looped implies the whole list is */ FIB_PATH_LIST_ATTRIBUTE_LOOPED, + /** + * a popular path-ist is one that is shared amongst many entries. + * Path list become popular as they gain more children, but they + * don't become unpopular as they lose them. + */ + FIB_PATH_LIST_ATTRIBUTE_POPULAR, /** * no uRPF - do not generate unicast RPF list for this path-list */ @@ -72,30 +73,30 @@ typedef enum fib_path_list_attribute_t_ { /** * Marher. Add new flags before this one, and then update it. */ - FIB_PATH_LIST_ATTRIBUTE_LAST = FIB_PATH_LIST_ATTRIBUTE_LOOPED, + FIB_PATH_LIST_ATTRIBUTE_LAST = FIB_PATH_LIST_ATTRIBUTE_NO_URPF, } fib_path_list_attribute_t; typedef enum fib_path_list_flags_t_ { FIB_PATH_LIST_FLAG_NONE = 0, FIB_PATH_LIST_FLAG_SHARED = (1 << FIB_PATH_LIST_ATTRIBUTE_SHARED), - FIB_PATH_LIST_FLAG_INDEXED = (1 << FIB_PATH_LIST_ATTRIBUTE_INDEXED), FIB_PATH_LIST_FLAG_DROP = (1 << FIB_PATH_LIST_ATTRIBUTE_DROP), FIB_PATH_LIST_FLAG_LOCAL = (1 << FIB_PATH_LIST_ATTRIBUTE_LOCAL), FIB_PATH_LIST_FLAG_EXCLUSIVE = (1 << FIB_PATH_LIST_ATTRIBUTE_EXCLUSIVE), FIB_PATH_LIST_FLAG_RESOLVED = (1 << FIB_PATH_LIST_ATTRIBUTE_RESOLVED), FIB_PATH_LIST_FLAG_LOOPED = (1 << FIB_PATH_LIST_ATTRIBUTE_LOOPED), + FIB_PATH_LIST_FLAG_POPULAR = (1 << FIB_PATH_LIST_ATTRIBUTE_POPULAR), FIB_PATH_LIST_FLAG_NO_URPF = (1 << FIB_PATH_LIST_ATTRIBUTE_NO_URPF), } fib_path_list_flags_t; #define FIB_PATH_LIST_ATTRIBUTES { \ [FIB_PATH_LIST_ATTRIBUTE_SHARED] = "shared", \ - [FIB_PATH_LIST_ATTRIBUTE_INDEXED] = "indexed", \ [FIB_PATH_LIST_ATTRIBUTE_RESOLVED] = "resolved", \ [FIB_PATH_LIST_ATTRIBUTE_DROP] = "drop", \ [FIB_PATH_LIST_ATTRIBUTE_EXCLUSIVE] = "exclusive", \ - [FIB_PATH_LIST_ATTRIBUTE_LOCAL] = "local", \ - [FIB_PATH_LIST_ATTRIBUTE_LOOPED] = "looped", \ - [FIB_PATH_LIST_ATTRIBUTE_NO_URPF] = "no-uRPF", \ + [FIB_PATH_LIST_ATTRIBUTE_LOCAL] = "local", \ + [FIB_PATH_LIST_ATTRIBUTE_LOOPED] = "looped", \ + [FIB_PATH_LIST_ATTRIBUTE_POPULAR] = "popular", \ + [FIB_PATH_LIST_ATTRIBUTE_NO_URPF] = "no-uRPF", \ } #define FOR_EACH_PATH_LIST_ATTRIBUTE(_item) \ @@ -148,6 +149,7 @@ extern int fib_path_list_recursive_loop_detect(fib_node_index_t path_list_index, fib_node_index_t **entry_indicies); extern u32 fib_path_list_get_resolving_interface(fib_node_index_t path_list_index); extern int fib_path_list_is_looped(fib_node_index_t path_list_index); +extern int fib_path_list_is_popular(fib_node_index_t path_list_index); extern fib_protocol_t fib_path_list_get_proto(fib_node_index_t path_list_index); extern u8 * fib_path_list_format(fib_node_index_t pl_index, u8 * s); diff --git a/src/vnet/fib/fib_table.c b/src/vnet/fib/fib_table.c index 0938ce9b..ff428049 100644 --- a/src/vnet/fib/fib_table.c +++ b/src/vnet/fib/fib_table.c @@ -608,11 +608,19 @@ fib_table_entry_path_remove2 (u32 fib_index, fib_entry_src_flag_t src_flag; int was_sourced; - /* + /* + * if it's not sourced, then there's nowt to remove + */ + was_sourced = fib_entry_is_sourced(fib_entry_index, source); + if (!was_sourced) + { + return; + } + + /* * don't nobody go nowhere */ fib_entry_lock(fib_entry_index); - was_sourced = fib_entry_is_sourced(fib_entry_index, source); for (ii = 0; ii < vec_len(rpath); ii++) { diff --git a/src/vnet/fib/fib_test.c b/src/vnet/fib/fib_test.c index cbb5640a..d3bdfa35 100644 --- a/src/vnet/fib/fib_test.c +++ b/src/vnet/fib/fib_test.c @@ -729,6 +729,9 @@ fib_test_v4 (void) .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02), }; + FIB_TEST((0 == pool_elts(load_balance_map_pool)), "LB-map pool size is %d", + pool_elts(load_balance_map_pool)); + tm = &test_main; /* record the nubmer of load-balances in use before we start */ @@ -3090,6 +3093,43 @@ fib_test_v4 (void) NULL, FIB_ROUTE_PATH_RESOLVE_VIA_HOST); + /* + * add a bunch load more entries using this path combo so that we get + * an LB-map created. + */ +#define N_P 128 + fib_prefix_t bgp_78s[N_P]; + for (ii = 0; ii < N_P; ii++) + { + bgp_78s[ii].fp_len = 32; + bgp_78s[ii].fp_proto = FIB_PROTOCOL_IP4; + bgp_78s[ii].fp_addr.ip4.as_u32 = clib_host_to_net_u32(0x4e000000+ii); + + + fib_table_entry_path_add(fib_index, + &bgp_78s[ii], + FIB_SOURCE_API, + FIB_ENTRY_FLAG_NONE, + FIB_PROTOCOL_IP4, + &pfx_1_1_1_3_s_32.fp_addr, + ~0, + fib_index, + 1, + NULL, + FIB_ROUTE_PATH_RESOLVE_VIA_HOST); + fib_table_entry_path_add(fib_index, + &bgp_78s[ii], + FIB_SOURCE_API, + FIB_ENTRY_FLAG_NONE, + FIB_PROTOCOL_IP4, + &nh_1_1_1_1, + ~0, + fib_index, + 1, + NULL, + FIB_ROUTE_PATH_RESOLVE_VIA_HOST); + } + fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx); dpo = fib_entry_contribute_ip_forwarding(fei); @@ -3138,6 +3178,9 @@ fib_test_v4 (void) 1, FIB_ROUTE_PATH_FLAG_NONE); + /* suspend so the update walk kicks int */ + vlib_process_suspend(vlib_get_main(), 1e-5); + fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx); FIB_TEST(!dpo_cmp(dpo, fib_entry_contribute_ip_forwarding(fei)), "post PIC 200.200.200.200/32 was inplace modified"); @@ -3175,6 +3218,9 @@ fib_test_v4 (void) NULL, FIB_ROUTE_PATH_FLAG_NONE); + /* suspend so the update walk kicks in */ + vlib_process_suspend(vlib_get_main(), 1e-5); + FIB_TEST(!dpo_cmp(dpo2, load_balance_get_bucket_i(lb, 0)), "post PIC recovery adj for 200.200.200.200/32 is recursive " "via adj for 1.1.1.1"); @@ -3201,6 +3247,20 @@ fib_test_v4 (void) 1, NULL, FIB_ROUTE_PATH_RESOLVE_VIA_HOST); + for (ii = 0; ii < N_P; ii++) + { + fib_table_entry_path_add(fib_index, + &bgp_78s[ii], + FIB_SOURCE_API, + FIB_ENTRY_FLAG_NONE, + FIB_PROTOCOL_IP4, + &pfx_1_1_1_2_s_32.fp_addr, + ~0, + fib_index, + 1, + NULL, + FIB_ROUTE_PATH_RESOLVE_VIA_HOST); + } fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx); dpo = fib_entry_contribute_ip_forwarding(fei); @@ -3233,6 +3293,8 @@ fib_test_v4 (void) ~0, 1, FIB_ROUTE_PATH_FLAG_NONE); + /* suspend so the update walk kicks int */ + vlib_process_suspend(vlib_get_main(), 1e-5); fei = fib_table_lookup_exact_match(fib_index, &bgp_200_pfx); dpo = fib_entry_contribute_ip_forwarding(fei); @@ -3270,6 +3332,16 @@ fib_test_v4 (void) NULL, FIB_ROUTE_PATH_FLAG_NONE); + for (ii = 0; ii < N_P; ii++) + { + fib_table_entry_delete(fib_index, + &bgp_78s[ii], + FIB_SOURCE_API); + FIB_TEST((FIB_NODE_INDEX_INVALID == + fib_table_lookup_exact_match(fib_index, &bgp_78s[ii])), + "%U removed", + format_fib_prefix, &bgp_78s[ii]); + } fib_table_entry_path_remove(fib_index, &bgp_200_pfx, FIB_SOURCE_API, @@ -3303,6 +3375,8 @@ fib_test_v4 (void) fib_table_entry_delete(fib_index, &pfx_1_1_1_0_s_28, FIB_SOURCE_API); + /* suspend so the update walk kicks int */ + vlib_process_suspend(vlib_get_main(), 1e-5); FIB_TEST((FIB_NODE_INDEX_INVALID == fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_0_s_28)), "1.1.1.1/28 removed"); @@ -3821,7 +3895,7 @@ fib_test_v4 (void) /* * -2 entries and -2 non-shared path-list */ - FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d", + FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d", fib_path_list_db_size()); FIB_TEST((PNBR == fib_path_list_pool_size()), "path list pool size is %d", fib_path_list_pool_size()); @@ -3855,7 +3929,7 @@ fib_test_v4 (void) FIB_TEST((ENBR-5 == pool_elts(fib_urpf_list_pool)), "uRPF pool size is %d", pool_elts(fib_urpf_list_pool)); FIB_TEST((0 == pool_elts(load_balance_map_pool)), "LB-map pool size is %d", - pool_elts(load_balance_map_pool)); + pool_elts(load_balance_map_pool)); FIB_TEST((lb_count == pool_elts(load_balance_pool)), "LB pool size is %d", pool_elts(load_balance_pool)); @@ -5900,6 +5974,12 @@ fib_test_label (void) .adj = DPO_PROTO_IP4, }, }; + fib_test_lb_bucket_t mpls_bucket_drop = { + .type = FT_LB_SPECIAL, + .special = { + .adj = DPO_PROTO_MPLS, + }, + }; fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, @@ -5932,9 +6012,9 @@ fib_test_label (void) &pfx_24001_neos); FIB_TEST(fib_test_validate_entry(fei, FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS, - 1, - &bucket_drop), - "24001/eos LB 1 buckets via: DROP"); + 1, + &mpls_bucket_drop), + "24001/neos LB 1 buckets via: DROP"); /* * add back the path with the valid label @@ -7707,6 +7787,12 @@ lfib_test (void) * A recursive via a label that does not exist */ fib_test_lb_bucket_t bucket_drop = { + .type = FT_LB_SPECIAL, + .special = { + .adj = DPO_PROTO_IP4, + }, + }; + fib_test_lb_bucket_t mpls_bucket_drop = { .type = FT_LB_SPECIAL, .special = { .adj = DPO_PROTO_MPLS, @@ -7735,7 +7821,12 @@ lfib_test (void) FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1, &bucket_drop), - "2.2.2.4/32 LB 1 buckets via: ip4-DROP"); + "1200/neos LB 1 buckets via: ip4-DROP"); + FIB_TEST(fib_test_validate_entry(lfe, + FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS, + 1, + &mpls_bucket_drop), + "1200/neos LB 1 buckets via: mpls-DROP"); fib_table_entry_delete(fib_index, &pfx_2_2_2_4_s_32, FIB_SOURCE_API); @@ -7940,18 +8031,19 @@ fib_test (vlib_main_t * vm, } else { - /* - * These walk UT aren't run as part of the full suite, since the - * fib-walk process must be disabled in order for the tests to work - * - * fib_test_walk(); - */ res += fib_test_v4(); res += fib_test_v6(); res += fib_test_ae(); res += fib_test_bfd(); res += fib_test_label(); res += lfib_test(); + + /* + * fib-walk process must be disabled in order for the walk tests to work + */ + fib_walk_process_disable(); + res += fib_test_walk(); + fib_walk_process_enable(); } if (res) diff --git a/src/vnet/fib/fib_walk.c b/src/vnet/fib/fib_walk.c index 938f7b8c..c570476d 100644 --- a/src/vnet/fib/fib_walk.c +++ b/src/vnet/fib/fib_walk.c @@ -95,11 +95,6 @@ typedef struct fib_walk_t_ */ static fib_walk_t *fib_walk_pool; -/** - * @brief There's only one event type sent to the walk process - */ -#define FIB_WALK_EVENT 0 - /** * Statistics maintained per-walk queue */ @@ -240,10 +235,13 @@ fib_walk_queue_get_front (fib_walk_priority_t prio) } static void -fib_walk_destroy (fib_walk_t *fwalk) +fib_walk_destroy (index_t fwi) { + fib_walk_t *fwalk; u32 bucket, ii; + fwalk = fib_walk_get(fwi); + if (FIB_NODE_INDEX_INVALID != fwalk->fw_prio_sibling) { fib_node_list_elt_remove(fwalk->fw_prio_sibling); @@ -252,6 +250,12 @@ fib_walk_destroy (fib_walk_t *fwalk) fwalk->fw_parent.fnp_index, fwalk->fw_dep_sibling); + /* + * refetch the walk object. More walks could have been spawned as a result + * of releasing the lock on the parent. + */ + fwalk = fib_walk_get(fwi); + /* * add the stats to the continuous histogram collection. */ @@ -466,8 +470,7 @@ fib_walk_process_queues (vlib_main_t * vm, */ if (FIB_WALK_ADVANCE_MORE != rc) { - fwalk = fib_walk_get(fwi); - fib_walk_destroy(fwalk); + fib_walk_destroy(fwi); fib_walk_queues.fwqs_queues[prio].fwq_stats[FIB_WALK_COMPLETED]++; } else @@ -510,6 +513,16 @@ that_will_do_for_now: return (fib_walk_sleep_duration[sleep]); } +/** + * Events sent to the FIB walk process + */ +typedef enum fib_walk_process_event_t_ +{ + FIB_WALK_PROCESS_EVENT_DATA, + FIB_WALK_PROCESS_EVENT_ENABLE, + FIB_WALK_PROCESS_EVENT_DISABLE, +} fib_walk_process_event; + /** * @brief The 'fib-walk' process's main loop. */ @@ -518,22 +531,47 @@ fib_walk_process (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f) { + uword event_type, *event_data = 0; f64 sleep_time; + int enabled; + enabled = 1; sleep_time = fib_walk_sleep_duration[FIB_WALK_SHORT_SLEEP]; while (1) { - vlib_process_wait_for_event_or_clock(vm, sleep_time); + /* + * the feature to disable/enable this walk process is only + * for testing purposes + */ + if (enabled) + { + vlib_process_wait_for_event_or_clock(vm, sleep_time); + } + else + { + vlib_process_wait_for_event(vm); + } - /* - * there may be lots of event queued between the processes, - * but the walks we want to schedule are in the priority queues, - * so we ignore the process events. - */ - vlib_process_get_events(vm, NULL); + event_type = vlib_process_get_events(vm, &event_data); + vec_reset_length(event_data); + + switch (event_type) + { + case FIB_WALK_PROCESS_EVENT_ENABLE: + enabled = 1; + break; + case FIB_WALK_PROCESS_EVENT_DISABLE: + enabled = 0; + break; + default: + break; + } - sleep_time = fib_walk_process_queues(vm, quota); + if (enabled) + { + sleep_time = fib_walk_process_queues(vm, quota); + } } /* @@ -610,8 +648,8 @@ fib_walk_prio_queue_enquue (fib_walk_priority_t prio, */ vlib_process_signal_event(vlib_get_main(), fib_walk_process_node.index, - FIB_WALK_EVENT, - FIB_WALK_EVENT); + FIB_WALK_PROCESS_EVENT_DATA, + 0); return (sibling); } @@ -742,7 +780,7 @@ fib_walk_sync (fib_node_type_t parent_type, ASSERT(FIB_NODE_INDEX_INVALID != merged_walk.fnp_index); ASSERT(FIB_NODE_TYPE_WALK == merged_walk.fnp_type); - fib_walk_destroy(fwalk); + fib_walk_destroy(fwi); fwi = merged_walk.fnp_index; fwalk = fib_walk_get(fwi); @@ -774,7 +812,7 @@ fib_walk_sync (fib_node_type_t parent_type, if (NULL != fwalk) { - fib_walk_destroy(fwalk); + fib_walk_destroy(fwi); } } @@ -1106,3 +1144,47 @@ VLIB_CLI_COMMAND (fib_walk_clear_command, static) = { .short_help = "clear fib walk", .function = fib_walk_clear, }; + +void +fib_walk_process_enable (void) +{ + vlib_process_signal_event(vlib_get_main(), + fib_walk_process_node.index, + FIB_WALK_PROCESS_EVENT_ENABLE, + 0); +} + +void +fib_walk_process_disable (void) +{ + vlib_process_signal_event(vlib_get_main(), + fib_walk_process_node.index, + FIB_WALK_PROCESS_EVENT_DISABLE, + 0); +} + +static clib_error_t * +fib_walk_process_enable_disable (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + if (unformat (input, "enable")) + { + fib_walk_process_enable(); + } + else if (unformat (input, "disable")) + { + fib_walk_process_disable(); + } + else + { + return clib_error_return(0, "choose enable or disable"); + } + return (NULL); +} + +VLIB_CLI_COMMAND (fib_walk_process_command, static) = { + .path = "test fib-walk-process", + .short_help = "test fib-walk-process [enable|disable]", + .function = fib_walk_process_enable_disable, +}; diff --git a/src/vnet/fib/fib_walk.h b/src/vnet/fib/fib_walk.h index 7413d8a2..fdf2f10c 100644 --- a/src/vnet/fib/fib_walk.h +++ b/src/vnet/fib/fib_walk.h @@ -54,5 +54,8 @@ extern void fib_walk_sync(fib_node_type_t parent_type, extern u8* format_fib_walk_priority(u8 *s, va_list ap); +extern void fib_walk_process_enable(void); +extern void fib_walk_process_disable(void); + #endif diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index 0f562037..697d2169 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -49,6 +49,7 @@ #include /* for FIB uRPF check */ #include #include +#include #include #include /* for mFIB table and entry creation */ @@ -89,7 +90,6 @@ ip4_lookup_inline (vlib_main_t * vm, { vlib_buffer_t *p0, *p1, *p2, *p3; ip4_header_t *ip0, *ip1, *ip2, *ip3; - __attribute__ ((unused)) tcp_header_t *tcp0, *tcp1, *tcp2, *tcp3; ip_lookup_next_t next0, next1, next2, next3; const load_balance_t *lb0, *lb1, *lb2, *lb3; ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3; @@ -188,11 +188,6 @@ ip4_lookup_inline (vlib_main_t * vm, leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, dst_addr3); } - tcp0 = (void *) (ip0 + 1); - tcp1 = (void *) (ip1 + 1); - tcp2 = (void *) (ip2 + 1); - tcp3 = (void *) (ip3 + 1); - if (!lookup_for_responses_to_locally_received_packets) { leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2); @@ -230,6 +225,15 @@ ip4_lookup_inline (vlib_main_t * vm, lb2 = load_balance_get (lb_index2); lb3 = load_balance_get (lb_index3); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); + ASSERT (lb1->lb_n_buckets > 0); + ASSERT (is_pow2 (lb1->lb_n_buckets)); + ASSERT (lb2->lb_n_buckets > 0); + ASSERT (is_pow2 (lb2->lb_n_buckets)); + ASSERT (lb3->lb_n_buckets > 0); + ASSERT (is_pow2 (lb3->lb_n_buckets)); + /* Use flow hash to compute multipath adjacency. */ hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0; hash_c1 = vnet_buffer (p1)->ip.flow_hash = 0; @@ -240,47 +244,57 @@ ip4_lookup_inline (vlib_main_t * vm, flow_hash_config0 = lb0->lb_hash_config; hash_c0 = vnet_buffer (p0)->ip.flow_hash = ip4_compute_flow_hash (ip0, flow_hash_config0); + dpo0 = + load_balance_get_fwd_bucket (lb0, + (hash_c0 & + (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) { flow_hash_config1 = lb1->lb_hash_config; hash_c1 = vnet_buffer (p1)->ip.flow_hash = ip4_compute_flow_hash (ip1, flow_hash_config1); + dpo1 = + load_balance_get_fwd_bucket (lb1, + (hash_c1 & + (lb1->lb_n_buckets_minus_1))); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } if (PREDICT_FALSE (lb2->lb_n_buckets > 1)) { flow_hash_config2 = lb2->lb_hash_config; hash_c2 = vnet_buffer (p2)->ip.flow_hash = ip4_compute_flow_hash (ip2, flow_hash_config2); + dpo2 = + load_balance_get_fwd_bucket (lb2, + (hash_c2 & + (lb2->lb_n_buckets_minus_1))); + } + else + { + dpo2 = load_balance_get_bucket_i (lb2, 0); } if (PREDICT_FALSE (lb3->lb_n_buckets > 1)) { flow_hash_config3 = lb3->lb_hash_config; hash_c3 = vnet_buffer (p3)->ip.flow_hash = ip4_compute_flow_hash (ip3, flow_hash_config3); + dpo3 = + load_balance_get_fwd_bucket (lb3, + (hash_c3 & + (lb3->lb_n_buckets_minus_1))); + } + else + { + dpo3 = load_balance_get_bucket_i (lb3, 0); } - - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - ASSERT (lb1->lb_n_buckets > 0); - ASSERT (is_pow2 (lb1->lb_n_buckets)); - ASSERT (lb2->lb_n_buckets > 0); - ASSERT (is_pow2 (lb2->lb_n_buckets)); - ASSERT (lb3->lb_n_buckets > 0); - ASSERT (is_pow2 (lb3->lb_n_buckets)); - - dpo0 = load_balance_get_bucket_i (lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); - dpo1 = load_balance_get_bucket_i (lb1, - (hash_c1 & - (lb1->lb_n_buckets_minus_1))); - dpo2 = load_balance_get_bucket_i (lb2, - (hash_c2 & - (lb2->lb_n_buckets_minus_1))); - dpo3 = load_balance_get_bucket_i (lb3, - (hash_c3 & - (lb3->lb_n_buckets_minus_1))); next0 = dpo0->dpoi_next_node; vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; @@ -293,20 +307,16 @@ ip4_lookup_inline (vlib_main_t * vm, vlib_increment_combined_counter (cm, thread_index, lb_index0, 1, - vlib_buffer_length_in_chain (vm, p0) - + sizeof (ethernet_header_t)); + vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter (cm, thread_index, lb_index1, 1, - vlib_buffer_length_in_chain (vm, p1) - + sizeof (ethernet_header_t)); + vlib_buffer_length_in_chain (vm, p1)); vlib_increment_combined_counter (cm, thread_index, lb_index2, 1, - vlib_buffer_length_in_chain (vm, p2) - + sizeof (ethernet_header_t)); + vlib_buffer_length_in_chain (vm, p2)); vlib_increment_combined_counter (cm, thread_index, lb_index3, 1, - vlib_buffer_length_in_chain (vm, p3) - + sizeof (ethernet_header_t)); + vlib_buffer_length_in_chain (vm, p3)); vlib_validate_buffer_enqueue_x4 (vm, node, next, to_next, n_left_to_next, @@ -318,7 +328,6 @@ ip4_lookup_inline (vlib_main_t * vm, { vlib_buffer_t *p0; ip4_header_t *ip0; - __attribute__ ((unused)) tcp_header_t *tcp0; ip_lookup_next_t next0; const load_balance_t *lb0; ip4_fib_mtrie_t *mtrie0; @@ -352,8 +361,6 @@ ip4_lookup_inline (vlib_main_t * vm, leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0); } - tcp0 = (void *) (ip0 + 1); - if (!lookup_for_responses_to_locally_received_packets) leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2); @@ -371,6 +378,9 @@ ip4_lookup_inline (vlib_main_t * vm, ASSERT (lbi0); lb0 = load_balance_get (lbi0); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); + /* Use flow hash to compute multipath adjacency. */ hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0; if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) @@ -379,20 +389,22 @@ ip4_lookup_inline (vlib_main_t * vm, hash_c0 = vnet_buffer (p0)->ip.flow_hash = ip4_compute_flow_hash (ip0, flow_hash_config0); + dpo0 = + load_balance_get_fwd_bucket (lb0, + (hash_c0 & + (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - - dpo0 = load_balance_get_bucket_i (lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); next0 = dpo0->dpoi_next_node; vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - vlib_increment_combined_counter - (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + vlib_increment_combined_counter (cm, thread_index, lbi0, 1, + vlib_buffer_length_in_chain (vm, + p0)); from += 1; to_next += 1; @@ -555,6 +567,12 @@ ip4_load_balance (vlib_main_t * vm, hc0 = vnet_buffer (p0)->ip.flow_hash = ip4_compute_flow_hash (ip0, lb0->lb_hash_config); } + dpo0 = load_balance_get_fwd_bucket + (lb0, (hc0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) { @@ -568,14 +586,13 @@ ip4_load_balance (vlib_main_t * vm, hc1 = vnet_buffer (p1)->ip.flow_hash = ip4_compute_flow_hash (ip1, lb1->lb_hash_config); } + dpo1 = load_balance_get_fwd_bucket + (lb1, (hc1 & (lb1->lb_n_buckets_minus_1))); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } - - dpo0 = - load_balance_get_bucket_i (lb0, - hc0 & (lb0->lb_n_buckets_minus_1)); - dpo1 = - load_balance_get_bucket_i (lb1, - hc1 & (lb1->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; next1 = dpo1->dpoi_next_node; @@ -629,11 +646,13 @@ ip4_load_balance (vlib_main_t * vm, hc0 = vnet_buffer (p0)->ip.flow_hash = ip4_compute_flow_hash (ip0, lb0->lb_hash_config); } + dpo0 = load_balance_get_fwd_bucket + (lb0, (hc0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - - dpo0 = - load_balance_get_bucket_i (lb0, - hc0 & (lb0->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c index 98bfd4d1..3bc07d0e 100644 --- a/src/vnet/ip/ip6_forward.c +++ b/src/vnet/ip/ip6_forward.c @@ -45,7 +45,7 @@ #include /* for FIB uRPF check */ #include #include -#include +#include #include #include @@ -138,6 +138,10 @@ ip6_lookup_inline (vlib_main_t * vm, lb0 = load_balance_get (lbi0); lb1 = load_balance_get (lbi1); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (lb1->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); + ASSERT (is_pow2 (lb1->lb_n_buckets)); vnet_buffer (p0)->ip.flow_hash = vnet_buffer (p1)->ip.flow_hash = 0; @@ -146,25 +150,29 @@ ip6_lookup_inline (vlib_main_t * vm, flow_hash_config0 = lb0->lb_hash_config; vnet_buffer (p0)->ip.flow_hash = ip6_compute_flow_hash (ip0, flow_hash_config0); + dpo0 = + load_balance_get_fwd_bucket (lb0, + (vnet_buffer (p0)->ip.flow_hash & + (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) { flow_hash_config1 = lb1->lb_hash_config; vnet_buffer (p1)->ip.flow_hash = ip6_compute_flow_hash (ip1, flow_hash_config1); + dpo1 = + load_balance_get_fwd_bucket (lb1, + (vnet_buffer (p1)->ip.flow_hash & + (lb1->lb_n_buckets_minus_1))); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } - - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (lb1->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - ASSERT (is_pow2 (lb1->lb_n_buckets)); - dpo0 = load_balance_get_bucket_i (lb0, - (vnet_buffer (p0)->ip.flow_hash & - lb0->lb_n_buckets_minus_1)); - dpo1 = load_balance_get_bucket_i (lb1, - (vnet_buffer (p1)->ip.flow_hash & - lb1->lb_n_buckets_minus_1)); - next0 = dpo0->dpoi_next_node; next1 = dpo1->dpoi_next_node; @@ -266,16 +274,24 @@ ip6_lookup_inline (vlib_main_t * vm, lb0 = load_balance_get (lbi0); vnet_buffer (p0)->ip.flow_hash = 0; + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) { flow_hash_config0 = lb0->lb_hash_config; vnet_buffer (p0)->ip.flow_hash = ip6_compute_flow_hash (ip0, flow_hash_config0); + dpo0 = + load_balance_get_fwd_bucket (lb0, + (vnet_buffer (p0)->ip.flow_hash & + (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); dpo0 = load_balance_get_bucket_i (lb0, (vnet_buffer (p0)->ip.flow_hash & lb0->lb_n_buckets_minus_1)); @@ -337,10 +353,18 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index, { fib_node_index_t fei; - fei = fib_table_entry_update_one_path (fib_index, &pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), FIB_PROTOCOL_IP6, NULL, /* No next-hop address */ - sw_if_index, ~0, // invalid FIB index - 1, NULL, // no label stack - FIB_ROUTE_PATH_FLAG_NONE); + fei = fib_table_entry_update_one_path (fib_index, + &pfx, + FIB_SOURCE_INTERFACE, + (FIB_ENTRY_FLAG_CONNECTED | + FIB_ENTRY_FLAG_ATTACHED), + FIB_PROTOCOL_IP6, + /* No next-hop address */ + NULL, sw_if_index, + /* invalid FIB index */ + ~0, 1, + /* no label stack */ + NULL, FIB_ROUTE_PATH_FLAG_NONE); a->neighbor_probe_adj_index = fib_entry_get_adj (fei); } @@ -366,7 +390,13 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index, } } - fib_table_entry_update_one_path (fib_index, &pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), FIB_PROTOCOL_IP6, &pfx.fp_addr, sw_if_index, ~0, // invalid FIB index + fib_table_entry_update_one_path (fib_index, &pfx, + FIB_SOURCE_INTERFACE, + (FIB_ENTRY_FLAG_CONNECTED | + FIB_ENTRY_FLAG_LOCAL), + FIB_PROTOCOL_IP6, + &pfx.fp_addr, + sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); } @@ -780,6 +810,14 @@ ip6_load_balance (vlib_main_t * vm, hc0 = vnet_buffer (p0)->ip.flow_hash = ip6_compute_flow_hash (ip0, lb0->lb_hash_config); } + dpo0 = + load_balance_get_fwd_bucket (lb0, + (hc0 & + lb0->lb_n_buckets_minus_1)); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) { @@ -793,14 +831,15 @@ ip6_load_balance (vlib_main_t * vm, hc1 = vnet_buffer (p1)->ip.flow_hash = ip6_compute_flow_hash (ip1, lb1->lb_hash_config); } + dpo1 = + load_balance_get_fwd_bucket (lb1, + (hc1 & + lb1->lb_n_buckets_minus_1)); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } - - dpo0 = - load_balance_get_bucket_i (lb0, - hc0 & (lb0->lb_n_buckets_minus_1)); - dpo1 = - load_balance_get_bucket_i (lb1, - hc1 & (lb1->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; next1 = dpo1->dpoi_next_node; @@ -869,10 +908,15 @@ ip6_load_balance (vlib_main_t * vm, hc0 = vnet_buffer (p0)->ip.flow_hash = ip6_compute_flow_hash (ip0, lb0->lb_hash_config); } + dpo0 = + load_balance_get_fwd_bucket (lb0, + (hc0 & + lb0->lb_n_buckets_minus_1)); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - dpo0 = - load_balance_get_bucket_i (lb0, - hc0 & (lb0->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; diff --git a/src/vnet/ip/ip6_neighbor.c b/src/vnet/ip/ip6_neighbor.c index 31182770..ee80ee3d 100644 --- a/src/vnet/ip/ip6_neighbor.c +++ b/src/vnet/ip/ip6_neighbor.c @@ -630,7 +630,7 @@ vnet_set_ip6_ethernet_neighbor (vlib_main_t * vm, n->fib_entry_index = fib_table_entry_update_one_path (fib_index, &pfx, FIB_SOURCE_ADJ, - FIB_ENTRY_FLAG_NONE, + FIB_ENTRY_FLAG_ATTACHED, FIB_PROTOCOL_IP6, &pfx.fp_addr, n->key.sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); diff --git a/src/vnet/mpls/mpls_lookup.c b/src/vnet/mpls/mpls_lookup.c index 3c6be7e8..4b8a3eef 100644 --- a/src/vnet/mpls/mpls_lookup.c +++ b/src/vnet/mpls/mpls_lookup.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include /** @@ -47,7 +47,7 @@ format_mpls_lookup_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *); - s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %d" + s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %#x " "label %d eos %d", t->next_index, t->lfib_index, t->lb_index, t->hash, vnet_mpls_uc_get_label( @@ -64,8 +64,15 @@ always_inline u32 mpls_compute_flow_hash (const mpls_unicast_header_t * hdr, flow_hash_config_t flow_hash_config) { - // FIXME - return (vnet_mpls_uc_get_label(hdr->label_exp_s_ttl)); + /* + * improve this to include: + * - all labels in the stack. + * - recognise entropy labels. + * + * We need to byte swap so we use the numerical value. i.e. an odd label + * leads to an odd bucket. ass opposed to a label above and below value X. + */ + return (vnet_mpls_uc_get_label(clib_net_to_host_u32(hdr->label_exp_s_ttl))); } static inline uword @@ -179,17 +186,21 @@ mpls_lookup (vlib_main_t * vm, else { lb0 = load_balance_get(lbi0); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); if (PREDICT_FALSE(lb0->lb_n_buckets > 1)) { hash_c0 = vnet_buffer (b0)->ip.flow_hash = mpls_compute_flow_hash(h0, lb0->lb_hash_config); + dpo0 = load_balance_get_fwd_bucket + (lb0, + (hash_c0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - dpo0 = load_balance_get_bucket_i(lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); next0 = dpo0->dpoi_next_node; vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; @@ -207,17 +218,21 @@ mpls_lookup (vlib_main_t * vm, else { lb1 = load_balance_get(lbi1); + ASSERT (lb1->lb_n_buckets > 0); + ASSERT (is_pow2 (lb1->lb_n_buckets)); if (PREDICT_FALSE(lb1->lb_n_buckets > 1)) { hash_c1 = vnet_buffer (b1)->ip.flow_hash = mpls_compute_flow_hash(h1, lb1->lb_hash_config); + dpo1 = load_balance_get_fwd_bucket + (lb1, + (hash_c1 & (lb1->lb_n_buckets_minus_1))); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } - ASSERT (lb1->lb_n_buckets > 0); - ASSERT (is_pow2 (lb1->lb_n_buckets)); - dpo1 = load_balance_get_bucket_i(lb1, - (hash_c1 & - (lb1->lb_n_buckets_minus_1))); next1 = dpo1->dpoi_next_node; vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; @@ -235,17 +250,21 @@ mpls_lookup (vlib_main_t * vm, else { lb2 = load_balance_get(lbi2); + ASSERT (lb2->lb_n_buckets > 0); + ASSERT (is_pow2 (lb2->lb_n_buckets)); if (PREDICT_FALSE(lb2->lb_n_buckets > 1)) { hash_c2 = vnet_buffer (b2)->ip.flow_hash = mpls_compute_flow_hash(h2, lb2->lb_hash_config); + dpo2 = load_balance_get_fwd_bucket + (lb2, + (hash_c2 & (lb2->lb_n_buckets_minus_1))); + } + else + { + dpo2 = load_balance_get_bucket_i (lb2, 0); } - ASSERT (lb2->lb_n_buckets > 0); - ASSERT (is_pow2 (lb2->lb_n_buckets)); - dpo2 = load_balance_get_bucket_i(lb2, - (hash_c2 & - (lb2->lb_n_buckets_minus_1))); next2 = dpo2->dpoi_next_node; vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index; @@ -263,17 +282,21 @@ mpls_lookup (vlib_main_t * vm, else { lb3 = load_balance_get(lbi3); + ASSERT (lb3->lb_n_buckets > 0); + ASSERT (is_pow2 (lb3->lb_n_buckets)); if (PREDICT_FALSE(lb3->lb_n_buckets > 1)) { hash_c3 = vnet_buffer (b3)->ip.flow_hash = mpls_compute_flow_hash(h3, lb3->lb_hash_config); + dpo3 = load_balance_get_fwd_bucket + (lb3, + (hash_c3 & (lb3->lb_n_buckets_minus_1))); + } + else + { + dpo3 = load_balance_get_bucket_i (lb3, 0); } - ASSERT (lb3->lb_n_buckets > 0); - ASSERT (is_pow2 (lb3->lb_n_buckets)); - dpo3 = load_balance_get_bucket_i(lb3, - (hash_c3 & - (lb3->lb_n_buckets_minus_1))); next3 = dpo3->dpoi_next_node; vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index; @@ -393,20 +416,21 @@ mpls_lookup (vlib_main_t * vm, else { lb0 = load_balance_get(lbi0); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); if (PREDICT_FALSE(lb0->lb_n_buckets > 1)) { hash_c0 = vnet_buffer (b0)->ip.flow_hash = mpls_compute_flow_hash(h0, lb0->lb_hash_config); + dpo0 = load_balance_get_fwd_bucket + (lb0, + (hash_c0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - - dpo0 = load_balance_get_bucket_i(lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); - next0 = dpo0->dpoi_next_node; vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; @@ -467,7 +491,7 @@ VLIB_REGISTER_NODE (mpls_lookup_node, static) = { .n_errors = MPLS_N_ERROR, .error_strings = mpls_error_strings, - .sibling_of = "ip4-lookup", + .sibling_of = "mpls-load-balance", .format_buffer = format_mpls_header, .format_trace = format_mpls_lookup_trace, @@ -574,6 +598,11 @@ mpls_load_balance (vlib_main_t * vm, { hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0); } + dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1)); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) { @@ -585,10 +614,12 @@ mpls_load_balance (vlib_main_t * vm, { hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1); } + dpo1 = load_balance_get_fwd_bucket(lb1, (hc1 & lb1->lb_n_buckets_minus_1)); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } - - dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1)); - dpo1 = load_balance_get_bucket_i(lb1, hc1 & (lb1->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; next1 = dpo1->dpoi_next_node; @@ -650,9 +681,12 @@ mpls_load_balance (vlib_main_t * vm, { hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0); } + dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1)); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - - dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; @@ -676,9 +710,13 @@ VLIB_REGISTER_NODE (mpls_load_balance_node) = { .function = mpls_load_balance, .name = "mpls-load-balance", .vector_size = sizeof (u32), - .sibling_of = "mpls-lookup", - .format_trace = format_mpls_load_balance_trace, + .n_next_nodes = 1, + .next_nodes = + { + [0] = "mpls-drop", + }, + }; VLIB_NODE_FUNCTION_MULTIARCH (mpls_load_balance_node, mpls_load_balance) diff --git a/test/test_mpls.py b/test/test_mpls.py index 700b7091..0ad1ee69 100644 --- a/test/test_mpls.py +++ b/test/test_mpls.py @@ -1054,5 +1054,364 @@ class TestMPLSDisabled(VppTestCase): self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled") +class TestMPLSPIC(VppTestCase): + """ MPLS PIC edge convergence """ + + def setUp(self): + super(TestMPLSPIC, self).setUp() + + # create 2 pg interfaces + self.create_pg_interfaces(range(4)) + + # core links + self.pg0.admin_up() + self.pg0.config_ip4() + self.pg0.resolve_arp() + self.pg0.enable_mpls() + self.pg1.admin_up() + self.pg1.config_ip4() + self.pg1.resolve_arp() + self.pg1.enable_mpls() + + # VRF (customer facing) link + self.pg2.admin_up() + self.pg2.set_table_ip4(1) + self.pg2.config_ip4() + self.pg2.resolve_arp() + self.pg2.set_table_ip6(1) + self.pg2.config_ip6() + self.pg2.resolve_ndp() + self.pg3.admin_up() + self.pg3.set_table_ip4(1) + self.pg3.config_ip4() + self.pg3.resolve_arp() + self.pg3.set_table_ip6(1) + self.pg3.config_ip6() + self.pg3.resolve_ndp() + + def tearDown(self): + super(TestMPLSPIC, self).tearDown() + self.pg0.disable_mpls() + for i in self.pg_interfaces: + i.unconfig_ip4() + i.unconfig_ip6() + i.set_table_ip4(0) + i.set_table_ip6(0) + i.admin_down() + + def test_mpls_ibgp_pic(self): + """ MPLS iBGP PIC edge convergence + + 1) setup many iBGP VPN routes via a pair of iBGP peers. + 2) Check EMCP forwarding to these peers + 3) withdraw the IGP route to one of these peers. + 4) check forwarding continues to the remaining peer + """ + + # + # IGP+LDP core routes + # + core_10_0_0_45 = VppIpRoute(self, "10.0.0.45", 32, + [VppRoutePath(self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[45])]) + core_10_0_0_45.add_vpp_config() + + core_10_0_0_46 = VppIpRoute(self, "10.0.0.46", 32, + [VppRoutePath(self.pg1.remote_ip4, + self.pg1.sw_if_index, + labels=[46])]) + core_10_0_0_46.add_vpp_config() + + # + # Lot's of VPN routes. We need more the 64 so VPP will build + # the fast convergence indirection + # + vpn_routes = [] + pkts = [] + for ii in range(64): + dst = "192.168.1.%d" % ii + vpn_routes.append(VppIpRoute(self, dst, 32, + [VppRoutePath("10.0.0.45", + 0xffffffff, + labels=[145], + is_resolve_host=1), + VppRoutePath("10.0.0.46", + 0xffffffff, + labels=[146], + is_resolve_host=1)], + table_id=1)) + vpn_routes[ii].add_vpp_config() + + pkts.append(Ether(dst=self.pg2.local_mac, + src=self.pg2.remote_mac) / + IP(src=self.pg2.remote_ip4, dst=dst) / + UDP(sport=1234, dport=1234) / + Raw('\xa5' * 100)) + + # + # Send the packet stream (one pkt to each VPN route) + # - expect a 50-50 split of the traffic + # + self.pg2.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg0._get_capture(1) + rx1 = self.pg1._get_capture(1) + + # not testig the LB hashing algorithm so we're not concerned + # with the split ratio, just as long as neither is 0 + self.assertNotEqual(0, len(rx0)) + self.assertNotEqual(0, len(rx1)) + + # + # use a test CLI command to stop the FIB walk process, this + # will prevent the FIB converging the VPN routes and thus allow + # us to probe the interim (psot-fail, pre-converge) state + # + self.vapi.ppcli("test fib-walk-process disable") + + # + # Withdraw one of the IGP routes + # + core_10_0_0_46.remove_vpp_config() + + # + # now all packets should be forwarded through the remaining peer + # + self.vapi.ppcli("clear trace") + self.pg2.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg0.get_capture(len(pkts)) + + # + # enable the FIB walk process to converge the FIB + # + self.vapi.ppcli("test fib-walk-process enable") + + # + # packets should still be forwarded through the remaining peer + # + self.pg2.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg0.get_capture(64) + + # + # Add the IGP route back and we return to load-balancing + # + core_10_0_0_46.add_vpp_config() + + self.pg2.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg0._get_capture(1) + rx1 = self.pg1._get_capture(1) + self.assertNotEqual(0, len(rx0)) + self.assertNotEqual(0, len(rx1)) + + def test_mpls_ebgp_pic(self): + """ MPLS eBGP PIC edge convergence + + 1) setup many eBGP VPN routes via a pair of eBGP peers + 2) Check EMCP forwarding to these peers + 3) withdraw one eBGP path - expect LB across remaining eBGP + """ + + # + # Lot's of VPN routes. We need more the 64 so VPP will build + # the fast convergence indirection + # + vpn_routes = [] + vpn_bindings = [] + pkts = [] + for ii in range(64): + dst = "192.168.1.%d" % ii + local_label = 1600 + ii + vpn_routes.append(VppIpRoute(self, dst, 32, + [VppRoutePath(self.pg2.remote_ip4, + 0xffffffff, + nh_table_id=1, + is_resolve_attached=1), + VppRoutePath(self.pg3.remote_ip4, + 0xffffffff, + nh_table_id=1, + is_resolve_attached=1)], + table_id=1)) + vpn_routes[ii].add_vpp_config() + + vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32, + ip_table_id=1)) + vpn_bindings[ii].add_vpp_config() + + pkts.append(Ether(dst=self.pg0.local_mac, + src=self.pg0.remote_mac) / + MPLS(label=local_label, ttl=64) / + IP(src=self.pg0.remote_ip4, dst=dst) / + UDP(sport=1234, dport=1234) / + Raw('\xa5' * 100)) + + self.pg0.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg2._get_capture(1) + rx1 = self.pg3._get_capture(1) + self.assertNotEqual(0, len(rx0)) + self.assertNotEqual(0, len(rx1)) + + # + # use a test CLI command to stop the FIB walk process, this + # will prevent the FIB converging the VPN routes and thus allow + # us to probe the interim (psot-fail, pre-converge) state + # + self.vapi.ppcli("test fib-walk-process disable") + + # + # withdraw the connected prefix on the interface. + # + self.pg2.unconfig_ip4() + + # + # now all packets should be forwarded through the remaining peer + # + self.pg0.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg3.get_capture(len(pkts)) + + # + # enable the FIB walk process to converge the FIB + # + self.vapi.ppcli("test fib-walk-process enable") + self.pg0.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg3.get_capture(len(pkts)) + + # + # put the connecteds back + # + self.pg2.config_ip4() + + self.pg0.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg2._get_capture(1) + rx1 = self.pg3._get_capture(1) + self.assertNotEqual(0, len(rx0)) + self.assertNotEqual(0, len(rx1)) + + def test_mpls_v6_ebgp_pic(self): + """ MPLSv6 eBGP PIC edge convergence + + 1) setup many eBGP VPNv6 routes via a pair of eBGP peers + 2) Check EMCP forwarding to these peers + 3) withdraw one eBGP path - expect LB across remaining eBGP + """ + + # + # Lot's of VPN routes. We need more the 64 so VPP will build + # the fast convergence indirection + # + vpn_routes = [] + vpn_bindings = [] + pkts = [] + for ii in range(64): + dst = "3000::%d" % ii + local_label = 1600 + ii + vpn_routes.append(VppIpRoute(self, dst, 128, + [VppRoutePath(self.pg2.remote_ip6, + 0xffffffff, + nh_table_id=1, + is_resolve_attached=1, + is_ip6=1), + VppRoutePath(self.pg3.remote_ip6, + 0xffffffff, + nh_table_id=1, + is_ip6=1, + is_resolve_attached=1)], + table_id=1, + is_ip6=1)) + vpn_routes[ii].add_vpp_config() + + vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128, + ip_table_id=1, + is_ip6=1)) + vpn_bindings[ii].add_vpp_config() + + pkts.append(Ether(dst=self.pg0.local_mac, + src=self.pg0.remote_mac) / + MPLS(label=local_label, ttl=64) / + IPv6(src=self.pg0.remote_ip6, dst=dst) / + UDP(sport=1234, dport=1234) / + Raw('\xa5' * 100)) + + self.pg0.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg2._get_capture(1) + rx1 = self.pg3._get_capture(1) + self.assertNotEqual(0, len(rx0)) + self.assertNotEqual(0, len(rx1)) + + # + # use a test CLI command to stop the FIB walk process, this + # will prevent the FIB converging the VPN routes and thus allow + # us to probe the interim (psot-fail, pre-converge) state + # + self.vapi.ppcli("test fib-walk-process disable") + + # + # withdraw the connected prefix on the interface. + # and shutdown the interface so the ND cache is flushed. + # + self.pg2.unconfig_ip6() + self.pg2.admin_down() + + # + # now all packets should be forwarded through the remaining peer + # + self.pg0.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg3.get_capture(len(pkts)) + + # + # enable the FIB walk process to converge the FIB + # + self.vapi.ppcli("test fib-walk-process enable") + self.pg0.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg3.get_capture(len(pkts)) + + # + # put the connecteds back + # + self.pg2.admin_up() + self.pg2.config_ip6() + + self.pg0.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg2._get_capture(1) + rx1 = self.pg3._get_capture(1) + self.assertNotEqual(0, len(rx0)) + self.assertNotEqual(0, len(rx1)) + + if __name__ == '__main__': unittest.main(testRunner=VppTestRunner) diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py index d6146f28..b68e2105 100644 --- a/test/vpp_ip_route.py +++ b/test/vpp_ip_route.py @@ -57,7 +57,9 @@ class VppRoutePath(object): nh_via_label=MPLS_LABEL_INVALID, is_ip6=0, rpf_id=0, - is_interface_rx=0): + is_interface_rx=0, + is_resolve_host=0, + is_resolve_attached=0): self.nh_itf = nh_sw_if_index self.nh_table_id = nh_table_id self.nh_via_label = nh_via_label @@ -68,6 +70,8 @@ class VppRoutePath(object): self.nh_addr = inet_pton(AF_INET6, nh_addr) else: self.nh_addr = inet_pton(AF_INET, nh_addr) + self.is_resolve_host = is_resolve_host + self.is_resolve_attached = is_resolve_attached self.is_interface_rx = is_interface_rx self.is_rpf_id = 0 if rpf_id != 0: @@ -136,7 +140,11 @@ class VppIpRoute(VppObject): next_hop_n_out_labels=len( path.nh_labels), next_hop_via_label=path.nh_via_label, - is_ipv6=self.is_ip6) + next_hop_table_id=path.nh_table_id, + is_ipv6=self.is_ip6, + is_resolve_host=path.is_resolve_host, + is_resolve_attached=path.is_resolve_attached, + is_multipath=1 if len(self.paths) > 1 else 0) self._test.registry.register(self, self._test.logger) def remove_vpp_config(self): @@ -154,13 +162,16 @@ class VppIpRoute(VppObject): is_ipv6=self.is_ip6) else: for path in self.paths: - self._test.vapi.ip_add_del_route(self.dest_addr, - self.dest_addr_len, - path.nh_addr, - path.nh_itf, - table_id=self.table_id, - is_add=0, - is_ipv6=self.is_ip6) + self._test.vapi.ip_add_del_route( + self.dest_addr, + self.dest_addr_len, + path.nh_addr, + path.nh_itf, + table_id=self.table_id, + next_hop_table_id=path.nh_table_id, + next_hop_via_label=path.nh_via_label, + is_add=0, + is_ipv6=self.is_ip6) def query_vpp_config(self): return find_route(self._test, @@ -318,33 +329,41 @@ class VppMplsIpBind(VppObject): """ def __init__(self, test, local_label, dest_addr, dest_addr_len, - table_id=0, ip_table_id=0): + table_id=0, ip_table_id=0, is_ip6=0): self._test = test - self.dest_addr = inet_pton(AF_INET, dest_addr) self.dest_addr_len = dest_addr_len + self.dest_addr = dest_addr self.local_label = local_label self.table_id = table_id self.ip_table_id = ip_table_id + self.is_ip6 = is_ip6 + if is_ip6: + self.dest_addrn = inet_pton(AF_INET6, dest_addr) + else: + self.dest_addrn = inet_pton(AF_INET, dest_addr) def add_vpp_config(self): self._test.vapi.mpls_ip_bind_unbind(self.local_label, - self.dest_addr, + self.dest_addrn, self.dest_addr_len, table_id=self.table_id, - ip_table_id=self.ip_table_id) + ip_table_id=self.ip_table_id, + is_ip4=(self.is_ip6 == 0)) self._test.registry.register(self, self._test.logger) def remove_vpp_config(self): self._test.vapi.mpls_ip_bind_unbind(self.local_label, - self.dest_addr, + self.dest_addrn, self.dest_addr_len, - is_bind=0) + table_id=self.table_id, + ip_table_id=self.ip_table_id, + is_bind=0, + is_ip4=(self.is_ip6 == 0)) def query_vpp_config(self): dump = self._test.vapi.mpls_fib_dump() for e in dump: if self.local_label == e.label \ - and self.eos_bit == e.eos_bit \ and self.table_id == e.table_id: return True return False @@ -357,7 +376,7 @@ class VppMplsIpBind(VppObject): % (self.table_id, self.local_label, self.ip_table_id, - inet_ntop(AF_INET, self.dest_addr), + self.dest_addr, self.dest_addr_len)) -- cgit 1.2.3-korg From 08b1648194e0076220c52aebe27ee1eb5615fd6a Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Sat, 13 May 2017 05:52:58 -0700 Subject: Fix FIB recursion loops via cover (VPP-842) Change-Id: Ia91c3e8cb27b9e4c1cccefc0a4857dd9995450ab Signed-off-by: Neale Ranns --- src/vnet/fib/fib_entry_src_rr.c | 78 +++++++++++++++++++++++------------------ src/vnet/fib/fib_path.c | 11 ++++-- src/vnet/fib/fib_test.c | 30 ++++++++++++++++ 3 files changed, 81 insertions(+), 38 deletions(-) (limited to 'src/vnet/fib/fib_entry_src_rr.c') diff --git a/src/vnet/fib/fib_entry_src_rr.c b/src/vnet/fib/fib_entry_src_rr.c index c145aaa2..d66ef7b1 100644 --- a/src/vnet/fib/fib_entry_src_rr.c +++ b/src/vnet/fib/fib_entry_src_rr.c @@ -69,6 +69,47 @@ fib_entry_src_rr_init (fib_entry_src_t *src) src->rr.fesr_sibling = FIB_NODE_INDEX_INVALID; } + +/* + * use the path-list of the cover, unless it would form a loop. + * that is unless the cover is via this entry. + * If a loop were to form it would be a 1 level loop (i.e. X via X), + * and there would be 2 locks on the path-list; one since its used + * by the cover, and 1 from here. The first lock will go when the + * cover is removed, the second, and last, when the covered walk + * occurs during the cover's removel - this is not a place where + * we can handle last lock gone. + * In short, don't let the loop form. The usual rules of 'we must + * let it form so we know when it breaks' don't apply here, since + * the loop will break when the cover changes, and this function + * will be called again when that happens. + */ +static void +fib_entry_src_rr_use_covers_pl (fib_entry_src_t *src, + const fib_entry_t *fib_entry, + const fib_entry_t *cover) +{ + fib_node_index_t *entries = NULL; + fib_protocol_t proto; + + proto = fib_entry->fe_prefix.fp_proto; + vec_add1(entries, fib_entry_get_index(fib_entry)); + + if (fib_path_list_recursive_loop_detect(cover->fe_parent, + &entries)) + { + src->fes_pl = fib_path_list_create_special( + proto, + FIB_PATH_LIST_FLAG_DROP, + drop_dpo_get(fib_proto_to_dpo(proto))); + } + else + { + src->fes_pl = cover->fe_parent; + } + vec_free(entries); +} + /* * Source activation. Called when the source is the new best source on the entry */ @@ -112,40 +153,7 @@ fib_entry_src_rr_activate (fib_entry_src_t *src, } else { - /* - * use the path-list of the cover, unless it would form a loop. - * that is unless the cover is via this entry. - * If a loop were to form it would be a 1 level loop (i.e. X via X), - * and there would be 2 locks on the path-list; one since its used - * by the cover, and 1 from here. The first lock will go when the - * cover is removed, the second, and last, when the covered walk - * occurs during the cover's removel - this is not a place where - * we can handle last lock gone. - * In short, don't let the loop form. The usual rules of 'we must - * let it form so we know when it breaks' don't apply here, since - * the loop will break when the cover changes, and this function - * will be called again when that happens. - */ - fib_node_index_t *entries = NULL; - fib_protocol_t proto; - - proto = fib_entry->fe_prefix.fp_proto; - vec_add1(entries, fib_entry_get_index(fib_entry)); - - if (fib_path_list_recursive_loop_detect(cover->fe_parent, - &entries)) - { - src->fes_pl = fib_path_list_create_special( - proto, - FIB_PATH_LIST_FLAG_DROP, - drop_dpo_get(fib_proto_to_dpo(proto))); - } - else - { - src->fes_pl = cover->fe_parent; - } - vec_free(entries); - + fib_entry_src_rr_use_covers_pl(src, fib_entry, cover); } fib_path_list_lock(src->fes_pl); @@ -256,7 +264,7 @@ fib_entry_src_rr_cover_update (fib_entry_src_t *src, } else { - src->fes_pl = cover->fe_parent; + fib_entry_src_rr_use_covers_pl(src, fib_entry, cover); } fib_path_list_lock(src->fes_pl); fib_path_list_unlock(old_path_list); diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c index 889317fd..b47d51f8 100644 --- a/src/vnet/fib/fib_path.c +++ b/src/vnet/fib/fib_path.c @@ -1716,7 +1716,11 @@ fib_path_get_resolving_interface (fib_node_index_t path_index) case FIB_PATH_TYPE_RECEIVE: return (path->receive.fp_interface); case FIB_PATH_TYPE_RECURSIVE: - return (fib_entry_get_resolving_interface(path->fp_via_fib)); + if (fib_path_is_resolved(path_index)) + { + return (fib_entry_get_resolving_interface(path->fp_via_fib)); + } + break; case FIB_PATH_TYPE_INTF_RX: case FIB_PATH_TYPE_SPECIAL: case FIB_PATH_TYPE_DEAG: @@ -1781,11 +1785,12 @@ fib_path_contribute_urpf (fib_node_index_t path_index, break; case FIB_PATH_TYPE_RECURSIVE: - if (FIB_NODE_INDEX_INVALID != path->fp_via_fib) + if (FIB_NODE_INDEX_INVALID != path->fp_via_fib && + !fib_path_is_looped(path_index)) { /* * there's unresolved due to constraints, and there's unresolved - * due to ain't go no via. can't do nowt w'out via. + * due to ain't got no via. can't do nowt w'out via. */ fib_entry_contribute_urpf(path->fp_via_fib, urpf); } diff --git a/src/vnet/fib/fib_test.c b/src/vnet/fib/fib_test.c index ddea6b86..486e5616 100644 --- a/src/vnet/fib/fib_test.c +++ b/src/vnet/fib/fib_test.c @@ -2976,6 +2976,36 @@ fib_test_v4 (void) FIB_TEST((ENBR+7 == fib_entry_pool_size()), "entry pool size is %d", fib_entry_pool_size()); + /* + * Make the default route recursive via a unknown next-hop. Thus the + * next hop's cover would be the default route + */ + fei = fib_table_entry_path_add(fib_index, + &pfx_0_0_0_0_s_0, + FIB_SOURCE_API, + FIB_ENTRY_FLAG_NONE, + FIB_PROTOCOL_IP4, + &pfx_23_23_23_23_s_32.fp_addr, + ~0, // recursive + fib_index, + 1, + NULL, + FIB_ROUTE_PATH_FLAG_NONE); + dpo = fib_entry_contribute_ip_forwarding(fei); + FIB_TEST(load_balance_is_drop(dpo), + "0.0.0.0.0/0 via is DROP"); + FIB_TEST((fib_entry_get_resolving_interface(fei) == ~0), + "no resolving interface for looped 0.0.0.0/0"); + + fei = fib_table_lookup_exact_match(fib_index, &pfx_23_23_23_23_s_32); + dpo = fib_entry_contribute_ip_forwarding(fei); + FIB_TEST(load_balance_is_drop(dpo), + "23.23.23.23/32 via is DROP"); + FIB_TEST((fib_entry_get_resolving_interface(fei) == ~0), + "no resolving interface for looped 23.23.23.23/32"); + + fib_table_entry_delete(fib_index, &pfx_0_0_0_0_s_0, FIB_SOURCE_API); + /* * A recursive route with recursion constraints. * 200.200.200.200/32 via 1.1.1.1 is recurse via host constrained -- cgit 1.2.3-korg From da78f957e46c686434149d332a477d7ea055d76a Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Wed, 24 May 2017 09:15:43 -0700 Subject: L2 over MPLS [support for VPWS/VPLS] - switch to using dpo_proto_t rather than fib_protocol_t in fib_paths so that we can describe L2 paths - VLIB nodes to handle pop/push of MPLS labels to L2 Change-Id: Id050d06a11fd2c9c1c81ce5a0654e6c5ae6afa6e Signed-off-by: Neale Ranns --- src/plugins/gtpu/gtpu.c | 2 +- src/plugins/snat/snat.c | 2 +- src/vat/api_format.c | 17 +- src/vnet/dhcp/client.c | 6 +- src/vnet/dhcp/dhcp6_proxy_node.c | 2 +- src/vnet/dpo/dpo.c | 19 ++ src/vnet/dpo/dpo.h | 11 +- src/vnet/dpo/interface_dpo.c | 30 +++ src/vnet/dpo/mpls_label_dpo.c | 45 +++- src/vnet/ethernet/arp.c | 4 +- src/vnet/fib/fib_api.h | 4 +- src/vnet/fib/fib_entry.c | 8 +- src/vnet/fib/fib_entry_src.c | 16 +- src/vnet/fib/fib_entry_src.h | 4 +- src/vnet/fib/fib_entry_src_api.c | 2 +- src/vnet/fib/fib_entry_src_default_route.c | 2 +- src/vnet/fib/fib_entry_src_interface.c | 2 +- src/vnet/fib/fib_entry_src_lisp.c | 8 +- src/vnet/fib/fib_entry_src_mpls.c | 4 +- src/vnet/fib/fib_entry_src_rr.c | 15 +- src/vnet/fib/fib_entry_src_special.c | 2 +- src/vnet/fib/fib_path.c | 79 +++---- src/vnet/fib/fib_path.h | 11 +- src/vnet/fib/fib_path_ext.c | 3 + src/vnet/fib/fib_path_list.c | 4 +- src/vnet/fib/fib_path_list.h | 4 +- src/vnet/fib/fib_table.c | 6 +- src/vnet/fib/fib_table.h | 6 +- src/vnet/fib/fib_test.c | 338 ++++++++++++++--------------- src/vnet/fib/fib_types.h | 8 +- src/vnet/interface_format.c | 12 +- src/vnet/ip/ip4_forward.c | 6 +- src/vnet/ip/ip6_forward.c | 4 +- src/vnet/ip/ip6_neighbor.c | 10 +- src/vnet/ip/ip_api.c | 38 ++-- src/vnet/ip/lookup.c | 18 +- src/vnet/lisp-gpe/lisp_gpe.c | 13 +- src/vnet/lisp-gpe/lisp_gpe_api.c | 10 +- src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c | 5 +- src/vnet/mfib/ip6_mfib.c | 6 +- src/vnet/mfib/mfib_entry.c | 10 +- src/vnet/mfib/mfib_test.c | 14 +- src/vnet/mpls/mpls.api | 4 +- src/vnet/mpls/mpls.c | 36 +-- src/vnet/mpls/mpls_api.c | 21 +- src/vnet/mpls/mpls_tunnel.c | 75 ++++--- src/vnet/mpls/mpls_tunnel.h | 38 ++-- src/vnet/srmpls/sr_mpls_policy.c | 6 +- src/vnet/srmpls/sr_mpls_steering.c | 2 +- src/vnet/srv6/sr_steering.c | 4 +- src/vnet/vxlan-gpe/vxlan_gpe.c | 2 +- src/vnet/vxlan/vxlan.c | 2 +- src/vpp/app/vpe_cli.c | 2 +- test/test_bfd.py | 6 +- test/test_gre.py | 24 +- test/test_ip6.py | 26 +-- test/test_map.py | 19 +- test/test_mpls.py | 318 ++++++++++++++++++++++----- test/test_p2p_ethernet.py | 16 +- test/vpp_ip_route.py | 24 +- test/vpp_mpls_tunnel_interface.py | 6 +- test/vpp_papi_provider.py | 4 +- 62 files changed, 889 insertions(+), 556 deletions(-) (limited to 'src/vnet/fib/fib_entry_src_rr.c') diff --git a/src/plugins/gtpu/gtpu.c b/src/plugins/gtpu/gtpu.c index 84745bd8..3dfb4210 100755 --- a/src/plugins/gtpu/gtpu.c +++ b/src/plugins/gtpu/gtpu.c @@ -534,7 +534,7 @@ int vnet_gtpu_add_del_tunnel fib_node_index_t mfei; adj_index_t ai; fib_route_path_t path = { - .frp_proto = fp, + .frp_proto = fib_proto_to_dpo (fp), .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, diff --git a/src/plugins/snat/snat.c b/src/plugins/snat/snat.c index 9fbc1e54..f196b5c2 100644 --- a/src/plugins/snat/snat.c +++ b/src/plugins/snat/snat.c @@ -135,7 +135,7 @@ snat_add_del_addr_to_fib (ip4_address_t * addr, u8 p_len, u32 sw_if_index, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL | FIB_ENTRY_FLAG_EXCLUSIVE), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, sw_if_index, ~0, diff --git a/src/vat/api_format.c b/src/vat/api_format.c index f97cdeef..009cf173 100644 --- a/src/vat/api_format.c +++ b/src/vat/api_format.c @@ -7498,7 +7498,7 @@ api_mpls_route_add_del (vat_main_t * vam) mpls_label_t *next_hop_out_label_stack = NULL; mpls_label_t local_label = MPLS_LABEL_INVALID; u8 is_eos = 0; - u8 next_hop_proto_is_ip4 = 1; + dpo_proto_t next_hop_proto = DPO_PROTO_IP4; /* Parse args required to build the message */ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) @@ -7517,13 +7517,13 @@ api_mpls_route_add_del (vat_main_t * vam) &v4_next_hop_address)) { next_hop_set = 1; - next_hop_proto_is_ip4 = 1; + next_hop_proto = DPO_PROTO_IP4; } else if (unformat (i, "via %U", unformat_ip6_address, &v6_next_hop_address)) { next_hop_set = 1; - next_hop_proto_is_ip4 = 0; + next_hop_proto = DPO_PROTO_IP6; } else if (unformat (i, "weight %d", &next_hop_weight)) ; @@ -7548,12 +7548,12 @@ api_mpls_route_add_del (vat_main_t * vam) else if (unformat (i, "lookup-in-ip4-table %d", &next_hop_table_id)) { next_hop_set = 1; - next_hop_proto_is_ip4 = 1; + next_hop_proto = DPO_PROTO_IP4; } else if (unformat (i, "lookup-in-ip6-table %d", &next_hop_table_id)) { next_hop_set = 1; - next_hop_proto_is_ip4 = 0; + next_hop_proto = DPO_PROTO_IP6; } else if (unformat (i, "next-hop-table %d", &next_hop_table_id)) ; @@ -7599,7 +7599,7 @@ api_mpls_route_add_del (vat_main_t * vam) mp->mr_create_table_if_needed = create_table_if_needed; mp->mr_is_add = is_add; - mp->mr_next_hop_proto_is_ip4 = next_hop_proto_is_ip4; + mp->mr_next_hop_proto = next_hop_proto; mp->mr_is_classify = is_classify; mp->mr_is_multipath = is_multipath; mp->mr_is_resolve_host = resolve_host; @@ -7622,13 +7622,14 @@ api_mpls_route_add_del (vat_main_t * vam) if (next_hop_set) { - if (next_hop_proto_is_ip4) + if (DPO_PROTO_IP4 == next_hop_proto) { clib_memcpy (mp->mr_next_hop, &v4_next_hop_address, sizeof (v4_next_hop_address)); } - else + else if (DPO_PROTO_IP6 == next_hop_proto) + { clib_memcpy (mp->mr_next_hop, &v6_next_hop_address, diff --git a/src/vnet/dhcp/client.c b/src/vnet/dhcp/client.c index cfe62a6f..dd5e99f2 100644 --- a/src/vnet/dhcp/client.c +++ b/src/vnet/dhcp/client.c @@ -296,7 +296,7 @@ int dhcp_client_for_us (u32 bi, vlib_buffer_t * b, &all_0s, FIB_SOURCE_DHCP, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh, c->sw_if_index, ~0, @@ -605,7 +605,7 @@ dhcp_bound_state (dhcp_client_main_t * dcm, dhcp_client_t * c, f64 now) c->sw_if_index), &all_0s, FIB_SOURCE_DHCP, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh, c->sw_if_index, ~0, @@ -900,7 +900,7 @@ int dhcp_client_add_del (dhcp_client_add_del_args_t * a) c->sw_if_index), &all_0s, FIB_SOURCE_DHCP, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh, c->sw_if_index, ~0, diff --git a/src/vnet/dhcp/dhcp6_proxy_node.c b/src/vnet/dhcp/dhcp6_proxy_node.c index e109cc4c..9c2f5220 100644 --- a/src/vnet/dhcp/dhcp6_proxy_node.c +++ b/src/vnet/dhcp/dhcp6_proxy_node.c @@ -857,7 +857,7 @@ dhcp6_proxy_set_server (ip46_address_t *addr, else { const fib_route_path_t path_for_us = { - .frp_proto = FIB_PROTOCOL_IP6, + .frp_proto = DPO_PROTO_IP6, .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, diff --git a/src/vnet/dpo/dpo.c b/src/vnet/dpo/dpo.c index 389f995b..aa770838 100644 --- a/src/vnet/dpo/dpo.c +++ b/src/vnet/dpo/dpo.c @@ -109,6 +109,25 @@ vnet_link_to_dpo_proto (vnet_link_t linkt) return (0); } +vnet_link_t +dpo_proto_to_link (dpo_proto_t dp) +{ + switch (dp) + { + case DPO_PROTO_IP6: + return (VNET_LINK_IP6); + case DPO_PROTO_IP4: + return (VNET_LINK_IP4); + case DPO_PROTO_MPLS: + return (VNET_LINK_MPLS); + case DPO_PROTO_ETHERNET: + return (VNET_LINK_ETHERNET); + case DPO_PROTO_NSH: + return (VNET_LINK_NSH); + } + return (~0); +} + u8 * format_dpo_type (u8 * s, va_list * args) { diff --git a/src/vnet/dpo/dpo.h b/src/vnet/dpo/dpo.h index 5aa4e2d2..42fc51d4 100644 --- a/src/vnet/dpo/dpo.h +++ b/src/vnet/dpo/dpo.h @@ -59,14 +59,10 @@ typedef u32 index_t; */ typedef enum dpo_proto_t_ { -#if CLIB_DEBUG > 0 - DPO_PROTO_IP4 = 1, -#else DPO_PROTO_IP4 = 0, -#endif DPO_PROTO_IP6, - DPO_PROTO_ETHERNET, DPO_PROTO_MPLS, + DPO_PROTO_ETHERNET, DPO_PROTO_NSH, } __attribute__((packed)) dpo_proto_t; @@ -272,6 +268,11 @@ extern u8 *format_dpo_type(u8 * s, va_list * args); */ extern u8 *format_dpo_proto(u8 * s, va_list * args); +/** + * @brief format a DPO protocol + */ +extern vnet_link_t dpo_proto_to_link(dpo_proto_t dp); + /** * @brief * Set and stack a DPO. diff --git a/src/vnet/dpo/interface_dpo.c b/src/vnet/dpo/interface_dpo.c index 8d700c23..780bfa2a 100644 --- a/src/vnet/dpo/interface_dpo.c +++ b/src/vnet/dpo/interface_dpo.c @@ -195,11 +195,17 @@ const static char* const interface_dpo_ip6_nodes[] = "interface-dpo-ip4", NULL, }; +const static char* const interface_dpo_l2_nodes[] = +{ + "interface-dpo-l2", + NULL, +}; const static char* const * const interface_dpo_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = interface_dpo_ip4_nodes, [DPO_PROTO_IP6] = interface_dpo_ip6_nodes, + [DPO_PROTO_ETHERNET] = interface_dpo_l2_nodes, [DPO_PROTO_MPLS] = NULL, }; @@ -382,6 +388,14 @@ interface_dpo_ip6 (vlib_main_t * vm, return (interface_dpo_inline(vm, node, from_frame)); } +static uword +interface_dpo_l2 (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return (interface_dpo_inline(vm, node, from_frame)); +} + VLIB_REGISTER_NODE (interface_dpo_ip4_node) = { .function = interface_dpo_ip4, .name = "interface-dpo-ip4", @@ -414,3 +428,19 @@ VLIB_REGISTER_NODE (interface_dpo_ip6_node) = { VLIB_NODE_FUNCTION_MULTIARCH (interface_dpo_ip6_node, interface_dpo_ip6) +VLIB_REGISTER_NODE (interface_dpo_l2_node) = { + .function = interface_dpo_l2, + .name = "interface-dpo-l2", + .vector_size = sizeof (u32), + .format_trace = format_interface_dpo_trace, + + .n_next_nodes = 2, + .next_nodes = { + [INTERFACE_DPO_DROP] = "error-drop", + [INTERFACE_DPO_INPUT] = "l2-input", + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (interface_dpo_l2_node, + interface_dpo_l2) + diff --git a/src/vnet/dpo/mpls_label_dpo.c b/src/vnet/dpo/mpls_label_dpo.c index 1c451a51..b178a902 100644 --- a/src/vnet/dpo/mpls_label_dpo.c +++ b/src/vnet/dpo/mpls_label_dpo.c @@ -192,7 +192,8 @@ mpls_label_imposition_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, u8 payload_is_ip4, - u8 payload_is_ip6) + u8 payload_is_ip6, + u8 payload_is_ethernet) { u32 n_left_from, next_index, * from, * to_next; @@ -320,6 +321,13 @@ mpls_label_imposition_inline (vlib_main_t * vm, ttl2 = ip2->hop_limit; ttl3 = ip3->hop_limit; } + else if (payload_is_ethernet) + { + /* + * nothing to chang ein the ethernet header + */ + ttl0 = ttl1 = ttl2 = ttl3 = 255; + } else { /* @@ -551,7 +559,7 @@ mpls_label_imposition (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return (mpls_label_imposition_inline(vm, node, frame, 0, 0)); + return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 0)); } VLIB_REGISTER_NODE (mpls_label_imposition_node) = { @@ -573,7 +581,7 @@ ip4_mpls_label_imposition (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return (mpls_label_imposition_inline(vm, node, frame, 1, 0)); + return (mpls_label_imposition_inline(vm, node, frame, 1, 0, 0)); } VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = { @@ -595,7 +603,7 @@ ip6_mpls_label_imposition (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return (mpls_label_imposition_inline(vm, node, frame, 0, 1)); + return (mpls_label_imposition_inline(vm, node, frame, 0, 1, 0)); } VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = { @@ -612,6 +620,28 @@ VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = { VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_node, ip6_mpls_label_imposition) +static uword +ethernet_mpls_label_imposition (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 1)); +} + +VLIB_REGISTER_NODE (ethernet_mpls_label_imposition_node) = { + .function = ethernet_mpls_label_imposition, + .name = "ethernet-mpls-label-imposition", + .vector_size = sizeof (u32), + + .format_trace = format_mpls_label_imposition_trace, + .n_next_nodes = 1, + .next_nodes = { + [0] = "error-drop", + } +}; +VLIB_NODE_FUNCTION_MULTIARCH (ethernet_mpls_label_imposition_node, + ethernet_mpls_label_imposition) + static void mpls_label_dpo_mem_show (void) { @@ -643,11 +673,18 @@ const static char* const mpls_label_imp_mpls_nodes[] = "mpls-label-imposition", NULL, }; +const static char* const mpls_label_imp_ethernet_nodes[] = +{ + "ethernet-mpls-label-imposition", + NULL, +}; + const static char* const * const mpls_label_imp_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = mpls_label_imp_ip4_nodes, [DPO_PROTO_IP6] = mpls_label_imp_ip6_nodes, [DPO_PROTO_MPLS] = mpls_label_imp_mpls_nodes, + [DPO_PROTO_ETHERNET] = mpls_label_imp_ethernet_nodes, }; diff --git a/src/vnet/ethernet/arp.c b/src/vnet/ethernet/arp.c index 4d9edaf5..8a394006 100644 --- a/src/vnet/ethernet/arp.c +++ b/src/vnet/ethernet/arp.c @@ -588,7 +588,7 @@ vnet_arp_set_ip4_over_ethernet_internal (vnet_main_t * vnm, e->fib_entry_index = fib_table_entry_path_add (fib_index, &pfx, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, &pfx.fp_addr, + DPO_PROTO_IP4, &pfx.fp_addr, e->sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); } @@ -1621,7 +1621,7 @@ arp_entry_free (ethernet_arp_interface_t * eai, ethernet_arp_ip4_entry_t * e) fib_table_entry_path_remove (fib_index, &pfx, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx.fp_addr, e->sw_if_index, ~0, 1, FIB_ROUTE_PATH_FLAG_NONE); diff --git a/src/vnet/fib/fib_api.h b/src/vnet/fib/fib_api.h index 73d76a42..d07d6cae 100644 --- a/src/vnet/fib/fib_api.h +++ b/src/vnet/fib/fib_api.h @@ -21,7 +21,7 @@ int add_del_route_check (fib_protocol_t table_proto, u32 table_id, u32 next_hop_sw_if_index, - fib_protocol_t next_hop_table_proto, + dpo_proto_t next_hop_table_proto, u32 next_hop_table_id, u8 create_missing_tables, u8 is_rpf_id, @@ -43,7 +43,7 @@ add_del_route_t_handler (u8 is_multipath, u8 is_rpf_id, u32 fib_index, const fib_prefix_t * prefix, - u8 next_hop_proto_is_ip4, + dpo_proto_t next_hop_proto, const ip46_address_t * next_hop, u32 next_hop_sw_if_index, u8 next_hop_fib_index, diff --git a/src/vnet/fib/fib_entry.c b/src/vnet/fib/fib_entry.c index d7ff1c8c..2027f2be 100644 --- a/src/vnet/fib/fib_entry.c +++ b/src/vnet/fib/fib_entry.c @@ -58,12 +58,18 @@ fib_entry_get_index (const fib_entry_t * fib_entry) return (fib_entry - fib_entry_pool); } -static fib_protocol_t +fib_protocol_t fib_entry_get_proto (const fib_entry_t * fib_entry) { return (fib_entry->fe_prefix.fp_proto); } +dpo_proto_t +fib_entry_get_dpo_proto (const fib_entry_t * fib_entry) +{ + return (fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto)); +} + fib_forward_chain_type_t fib_entry_get_default_chain_type (const fib_entry_t *fib_entry) { diff --git a/src/vnet/fib/fib_entry_src.c b/src/vnet/fib/fib_entry_src.c index ff73cbf9..173df74f 100644 --- a/src/vnet/fib/fib_entry_src.c +++ b/src/vnet/fib/fib_entry_src.c @@ -29,12 +29,6 @@ */ static fib_entry_src_vft_t fib_entry_src_vft[FIB_SOURCE_MAX]; -static fib_protocol_t -fib_entry_get_proto (const fib_entry_t * fib_entry) -{ - return (fib_entry->fe_prefix.fp_proto); -} - void fib_entry_src_register (fib_source_t source, const fib_entry_src_vft_t *vft) @@ -861,7 +855,7 @@ fib_entry_src_action_add (fib_entry_t *fib_entry, fib_entry_src_vft[source].fesv_add(esrc, fib_entry, flags, - fib_entry_get_proto(fib_entry), + fib_entry_get_dpo_proto(fib_entry), dpo); } @@ -914,7 +908,7 @@ fib_entry_src_action_update (fib_entry_t *fib_entry, fib_entry_src_vft[source].fesv_add(esrc, fib_entry, flags, - fib_entry_get_proto(fib_entry), + fib_entry_get_dpo_proto(fib_entry), dpo); } @@ -1106,8 +1100,7 @@ fib_entry_src_action_path_add (fib_entry_t *fib_entry, source, flags, drop_dpo_get( - fib_proto_to_dpo( - fib_entry_get_proto(fib_entry)))); + fib_entry_get_dpo_proto(fib_entry))); esrc = fib_entry_src_find(fib_entry, source, NULL); } @@ -1166,8 +1159,7 @@ fib_entry_src_action_path_swap (fib_entry_t *fib_entry, source, flags, drop_dpo_get( - fib_proto_to_dpo( - fib_entry_get_proto(fib_entry)))); + fib_entry_get_dpo_proto(fib_entry))); esrc = fib_entry_src_find(fib_entry, source, NULL); } diff --git a/src/vnet/fib/fib_entry_src.h b/src/vnet/fib/fib_entry_src.h index 640c174d..35c43936 100644 --- a/src/vnet/fib/fib_entry_src.h +++ b/src/vnet/fib/fib_entry_src.h @@ -73,7 +73,7 @@ typedef void (*fib_entry_src_deactivate_t)(fib_entry_src_t *src, typedef void (*fib_entry_src_add_t)(fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo); /** @@ -277,6 +277,8 @@ extern void fib_entry_src_mk_lb (fib_entry_t *fib_entry, fib_forward_chain_type_t fct, dpo_id_t *dpo_lb); +extern fib_protocol_t fib_entry_get_proto(const fib_entry_t * fib_entry); +extern dpo_proto_t fib_entry_get_dpo_proto(const fib_entry_t * fib_entry); /* * Per-source registration. declared here so we save a separate .h file for each diff --git a/src/vnet/fib/fib_entry_src_api.c b/src/vnet/fib/fib_entry_src_api.c index f895886b..1cdcfbde 100644 --- a/src/vnet/fib/fib_entry_src_api.c +++ b/src/vnet/fib/fib_entry_src_api.c @@ -131,7 +131,7 @@ static void fib_entry_src_api_add (fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo) { if (FIB_ENTRY_FLAG_NONE != flags) diff --git a/src/vnet/fib/fib_entry_src_default_route.c b/src/vnet/fib/fib_entry_src_default_route.c index 9f4e7c36..431abb66 100644 --- a/src/vnet/fib/fib_entry_src_default_route.c +++ b/src/vnet/fib/fib_entry_src_default_route.c @@ -35,7 +35,7 @@ static void fib_entry_src_default_route_add (fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo) { src->fes_pl = fib_path_list_create_special(proto, diff --git a/src/vnet/fib/fib_entry_src_interface.c b/src/vnet/fib/fib_entry_src_interface.c index bb87818f..6c087f34 100644 --- a/src/vnet/fib/fib_entry_src_interface.c +++ b/src/vnet/fib/fib_entry_src_interface.c @@ -35,7 +35,7 @@ static void fib_entry_src_interface_add (fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo) { src->fes_pl = fib_path_list_create_special( diff --git a/src/vnet/fib/fib_entry_src_lisp.c b/src/vnet/fib/fib_entry_src_lisp.c index 7f8b91bb..e72dce63 100644 --- a/src/vnet/fib/fib_entry_src_lisp.c +++ b/src/vnet/fib/fib_entry_src_lisp.c @@ -79,10 +79,10 @@ fib_entry_src_lisp_path_remove (fib_entry_src_t *src, static void fib_entry_src_lisp_add (fib_entry_src_t *src, - const fib_entry_t *entry, - fib_entry_flag_t flags, - fib_protocol_t proto, - const dpo_id_t *dpo) + const fib_entry_t *entry, + fib_entry_flag_t flags, + dpo_proto_t proto, + const dpo_id_t *dpo) { if (FIB_ENTRY_FLAG_NONE != flags) { diff --git a/src/vnet/fib/fib_entry_src_mpls.c b/src/vnet/fib/fib_entry_src_mpls.c index 14c7310f..a616458f 100644 --- a/src/vnet/fib/fib_entry_src_mpls.c +++ b/src/vnet/fib/fib_entry_src_mpls.c @@ -57,13 +57,13 @@ static void fib_entry_src_mpls_add (fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo) { src->fes_pl = fib_path_list_create_special(proto, FIB_PATH_LIST_FLAG_DROP, - drop_dpo_get(fib_proto_to_dpo(proto))); + drop_dpo_get(proto)); } static void diff --git a/src/vnet/fib/fib_entry_src_rr.c b/src/vnet/fib/fib_entry_src_rr.c index d66ef7b1..1153f3f1 100644 --- a/src/vnet/fib/fib_entry_src_rr.c +++ b/src/vnet/fib/fib_entry_src_rr.c @@ -35,7 +35,7 @@ fib_entry_src_rr_resolve_via_connected (fib_entry_src_t *src, const fib_entry_t *cover) { const fib_route_path_t path = { - .frp_proto = fib_entry->fe_prefix.fp_proto, + .frp_proto = fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto), .frp_addr = fib_entry->fe_prefix.fp_addr, .frp_sw_if_index = fib_entry_get_resolving_interface( fib_entry_get_index(cover)), @@ -90,18 +90,17 @@ fib_entry_src_rr_use_covers_pl (fib_entry_src_t *src, const fib_entry_t *cover) { fib_node_index_t *entries = NULL; - fib_protocol_t proto; + dpo_proto_t proto; - proto = fib_entry->fe_prefix.fp_proto; + proto = fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto); vec_add1(entries, fib_entry_get_index(fib_entry)); if (fib_path_list_recursive_loop_detect(cover->fe_parent, &entries)) { - src->fes_pl = fib_path_list_create_special( - proto, - FIB_PATH_LIST_FLAG_DROP, - drop_dpo_get(fib_proto_to_dpo(proto))); + src->fes_pl = fib_path_list_create_special(proto, + FIB_PATH_LIST_FLAG_DROP, + drop_dpo_get(proto)); } else { @@ -126,7 +125,7 @@ fib_entry_src_rr_activate (fib_entry_src_t *src, */ if (FIB_PROTOCOL_MPLS == fib_entry->fe_prefix.fp_proto) { - src->fes_pl = fib_path_list_create_special(FIB_PROTOCOL_MPLS, + src->fes_pl = fib_path_list_create_special(DPO_PROTO_MPLS, FIB_PATH_LIST_FLAG_DROP, NULL); fib_path_list_lock(src->fes_pl); diff --git a/src/vnet/fib/fib_entry_src_special.c b/src/vnet/fib/fib_entry_src_special.c index 75605d7f..e979e18f 100644 --- a/src/vnet/fib/fib_entry_src_special.c +++ b/src/vnet/fib/fib_entry_src_special.c @@ -43,7 +43,7 @@ static void fib_entry_src_special_add (fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo) { src->fes_pl = diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c index 3a67a544..58050ccb 100644 --- a/src/vnet/fib/fib_path.c +++ b/src/vnet/fib/fib_path.c @@ -193,7 +193,7 @@ typedef struct fib_path_t_ { * next-hop's address. We can't derive this from the address itself * since the address can be all zeros */ - fib_protocol_t fp_nh_proto; + dpo_proto_t fp_nh_proto; /** * UCMP [unnormalised] weigth @@ -381,7 +381,7 @@ format_fib_path (u8 * s, va_list * args) s = format (s, " index:%d ", fib_path_get_index(path)); s = format (s, "pl-index:%d ", path->fp_pl_index); - s = format (s, "%U ", format_fib_protocol, path->fp_nh_proto); + s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto); s = format (s, "weight=%d ", path->fp_weight); s = format (s, "pref=%d ", path->fp_preference); s = format (s, "%s: ", fib_path_type_names[path->fp_type]); @@ -454,7 +454,7 @@ format_fib_path (u8 * s, va_list * args) } break; case FIB_PATH_TYPE_RECURSIVE: - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { s = format (s, "via %U %U", format_mpls_unicast_label, @@ -552,14 +552,14 @@ fib_path_attached_next_hop_get_adj (fib_path_t *path, * the subnet address (the attached route) links to the * auto-adj (see below), we want that adj here too. */ - return (adj_nbr_add_or_lock(path->fp_nh_proto, + return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), link, &zero_addr, path->attached_next_hop.fp_interface)); } else { - return (adj_nbr_add_or_lock(path->fp_nh_proto, + return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), link, &path->attached_next_hop.fp_nh, path->attached_next_hop.fp_interface)); @@ -575,10 +575,10 @@ fib_path_attached_next_hop_set (fib_path_t *path) */ dpo_set(&path->fp_dpo, DPO_ADJACENCY, - fib_proto_to_dpo(path->fp_nh_proto), + path->fp_nh_proto, fib_path_attached_next_hop_get_adj( path, - fib_proto_to_link(path->fp_nh_proto))); + dpo_proto_to_link(path->fp_nh_proto))); /* * become a child of the adjacency so we receive updates @@ -607,14 +607,14 @@ fib_path_attached_get_adj (fib_path_t *path, * point-2-point interfaces do not require a glean, since * there is nothing to ARP. Install a rewrite/nbr adj instead */ - return (adj_nbr_add_or_lock(path->fp_nh_proto, + return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), link, &zero_addr, path->attached.fp_interface)); } else { - return (adj_glean_add_or_lock(path->fp_nh_proto, + return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), path->attached.fp_interface, NULL)); } @@ -650,7 +650,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); } else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST) { @@ -668,7 +668,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); /* * PIC edge trigger. let the load-balance maps know @@ -685,7 +685,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib))) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); /* * PIC edge trigger. let the load-balance maps know @@ -699,7 +699,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (!fib_entry_is_resolved(path->fp_via_fib)) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); /* * PIC edge trigger. let the load-balance maps know @@ -720,9 +720,7 @@ fib_path_recursive_adj_update (fib_path_t *path, */ dpo_copy(dpo, &via_dpo); - FIB_PATH_DBG(path, "recursive update: %U", - fib_get_lookup_main(path->fp_nh_proto), - &path->fp_dpo, 2); + FIB_PATH_DBG(path, "recursive update:"); dpo_reset(&via_dpo); } @@ -804,13 +802,8 @@ fib_path_unresolve (fib_path_t *path) static fib_forward_chain_type_t fib_path_to_chain_type (const fib_path_t *path) { - switch (path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { - case FIB_PROTOCOL_IP4: - return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4); - case FIB_PROTOCOL_IP6: - return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6); - case FIB_PROTOCOL_MPLS: if (FIB_PATH_TYPE_RECURSIVE == path->fp_type && MPLS_EOS == path->recursive.fp_nh.fp_eos) { @@ -821,7 +814,10 @@ fib_path_to_chain_type (const fib_path_t *path) return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS); } } - return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4); + else + { + return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto)); + } } /* @@ -927,7 +923,7 @@ FIXME comment ai = fib_path_attached_next_hop_get_adj( path, - fib_proto_to_link(path->fp_nh_proto)); + dpo_proto_to_link(path->fp_nh_proto)); path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; if (if_is_up && adj_is_up(ai)) @@ -935,9 +931,7 @@ FIXME comment path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED; } - dpo_set(&path->fp_dpo, DPO_ADJACENCY, - fib_proto_to_dpo(path->fp_nh_proto), - ai); + dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai); adj_unlock(ai); if (!if_is_up) @@ -1141,7 +1135,7 @@ fib_path_create (fib_node_index_t pl_index, else { path->fp_type = FIB_PATH_TYPE_RECURSIVE; - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { path->recursive.fp_nh.fp_local_label = rpath->frp_local_label; path->recursive.fp_nh.fp_eos = rpath->frp_eos; @@ -1167,7 +1161,7 @@ fib_path_create (fib_node_index_t pl_index, */ fib_node_index_t fib_path_create_special (fib_node_index_t pl_index, - fib_protocol_t nh_proto, + dpo_proto_t nh_proto, fib_path_cfg_flags_t flags, const dpo_id_t *dpo) { @@ -1433,7 +1427,7 @@ fib_path_cmp_w_route_path (fib_node_index_t path_index, res = (path->attached.fp_interface - rpath->frp_sw_if_index); break; case FIB_PATH_TYPE_RECURSIVE: - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label; @@ -1535,8 +1529,7 @@ fib_path_recursive_loop_detect (fib_node_index_t path_index, FIB_PATH_DBG(path, "recursive loop formed"); path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP; - dpo_copy(&path->fp_dpo, - drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto)); } else { @@ -1590,8 +1583,7 @@ fib_path_resolve (fib_node_index_t path_index) */ if (fib_path_is_permanent_drop(path)) { - dpo_copy(&path->fp_dpo, - drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto)); path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; return (fib_path_is_resolved(path_index)); } @@ -1612,9 +1604,9 @@ fib_path_resolve (fib_node_index_t path_index) } dpo_set(&path->fp_dpo, DPO_ADJACENCY, - fib_proto_to_dpo(path->fp_nh_proto), + path->fp_nh_proto, fib_path_attached_get_adj(path, - fib_proto_to_link(path->fp_nh_proto))); + dpo_proto_to_link(path->fp_nh_proto))); /* * become a child of the adjacency so we receive updates @@ -1639,7 +1631,7 @@ fib_path_resolve (fib_node_index_t path_index) ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib); - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label, path->recursive.fp_nh.fp_eos, @@ -1680,8 +1672,7 @@ fib_path_resolve (fib_node_index_t path_index) /* * Resolve via the drop */ - dpo_copy(&path->fp_dpo, - drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto)); break; case FIB_PATH_TYPE_DEAG: { @@ -1696,7 +1687,7 @@ fib_path_resolve (fib_node_index_t path_index) LOOKUP_UNICAST); lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id, - fib_proto_to_dpo(path->fp_nh_proto), + path->fp_nh_proto, cast, LOOKUP_INPUT_DST_ADDR, LOOKUP_TABLE_FROM_CONFIG, @@ -1707,7 +1698,7 @@ fib_path_resolve (fib_node_index_t path_index) /* * Resolve via a receive DPO. */ - receive_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto), + receive_dpo_add_or_lock(path->fp_nh_proto, path->receive.fp_interface, &path->receive.fp_addr, &path->fp_dpo); @@ -1716,7 +1707,7 @@ fib_path_resolve (fib_node_index_t path_index) /* * Resolve via a receive DPO. */ - interface_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto), + interface_dpo_add_or_lock(path->fp_nh_proto, path->intf_rx.fp_interface, &path->fp_dpo); break; @@ -2035,7 +2026,7 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, /* * Create the adj needed for sending IP multicast traffic */ - ai = adj_mcast_add_or_lock(path->fp_nh_proto, + ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), fib_forw_chain_type_to_link_type(fct), path->attached.fp_interface); dpo_set(dpo, DPO_ADJACENCY, @@ -2187,7 +2178,7 @@ fib_path_encode (fib_node_index_t path_list_index, return (FIB_PATH_LIST_WALK_CONTINUE); } -fib_protocol_t +dpo_proto_t fib_path_get_proto (fib_node_index_t path_index) { fib_path_t *path; diff --git a/src/vnet/fib/fib_path.h b/src/vnet/fib/fib_path.h index a34cb43f..f986e437 100644 --- a/src/vnet/fib/fib_path.h +++ b/src/vnet/fib/fib_path.h @@ -78,6 +78,11 @@ typedef enum fib_path_cfg_attribute_t_ { * The path is an interface recieve */ FIB_PATH_CFG_ATTRIBUTE_LOCAL, + /** + * The path is L2. i.e. the parameters therein are to be interpreted as + * pertaining to L2 config. + */ + FIB_PATH_CFG_ATTRIBUTE_L2, /** * Marker. Add new types before this one, then update it. */ @@ -98,6 +103,7 @@ typedef enum fib_path_cfg_attribute_t_ { [FIB_PATH_CFG_ATTRIBUTE_ATTACHED] = "attached", \ [FIB_PATH_CFG_ATTRIBUTE_INTF_RX] = "interface-rx", \ [FIB_PATH_CFG_ATTRIBUTE_RPF_ID] = "rpf-id", \ + [FIB_PATH_CFG_ATTRIBUTE_L2] = "l2", \ } #define FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(_item) \ @@ -118,6 +124,7 @@ typedef enum fib_path_cfg_flags_t_ { FIB_PATH_CFG_FLAG_ATTACHED = (1 << FIB_PATH_CFG_ATTRIBUTE_ATTACHED), FIB_PATH_CFG_FLAG_INTF_RX = (1 << FIB_PATH_CFG_ATTRIBUTE_INTF_RX), FIB_PATH_CFG_FLAG_RPF_ID = (1 << FIB_PATH_CFG_ATTRIBUTE_RPF_ID), + FIB_PATH_CFG_FLAG_L2 = (1 << FIB_PATH_CFG_ATTRIBUTE_L2), } __attribute__ ((packed)) fib_path_cfg_flags_t; @@ -131,7 +138,7 @@ extern u8 * format_fib_path(u8 * s, va_list * args); extern fib_node_index_t fib_path_create(fib_node_index_t pl_index, const fib_route_path_t *path); extern fib_node_index_t fib_path_create_special(fib_node_index_t pl_index, - fib_protocol_t nh_proto, + dpo_proto_t nh_proto, fib_path_cfg_flags_t flags, const dpo_id_t *dpo); @@ -148,7 +155,7 @@ extern int fib_path_is_recursive_constrained(fib_node_index_t path_index); extern int fib_path_is_exclusive(fib_node_index_t path_index); extern int fib_path_is_deag(fib_node_index_t path_index); extern int fib_path_is_looped(fib_node_index_t path_index); -extern fib_protocol_t fib_path_get_proto(fib_node_index_t path_index); +extern dpo_proto_t fib_path_get_proto(fib_node_index_t path_index); extern void fib_path_destroy(fib_node_index_t path_index); extern uword fib_path_hash(fib_node_index_t path_index); extern load_balance_path_t * fib_path_append_nh_for_multipath_hash( diff --git a/src/vnet/fib/fib_path_ext.c b/src/vnet/fib/fib_path_ext.c index 26f2b9b6..4438671b 100644 --- a/src/vnet/fib/fib_path_ext.c +++ b/src/vnet/fib/fib_path_ext.c @@ -191,6 +191,9 @@ fib_path_ext_stack (fib_path_ext_t *path_ext, case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS: parent_fct = child_fct; break; + case FIB_FORW_CHAIN_TYPE_ETHERNET: + parent_fct = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS; + break; default: return (nhs); break; diff --git a/src/vnet/fib/fib_path_list.c b/src/vnet/fib/fib_path_list.c index 7a9c328c..f30fd7ea 100644 --- a/src/vnet/fib/fib_path_list.c +++ b/src/vnet/fib/fib_path_list.c @@ -611,7 +611,7 @@ fib_path_list_get_resolving_interface (fib_node_index_t path_list_index) return (sw_if_index); } -fib_protocol_t +dpo_proto_t fib_path_list_get_proto (fib_node_index_t path_list_index) { fib_path_list_t *path_list; @@ -753,7 +753,7 @@ fib_path_list_flags_2_path_flags (fib_path_list_flags_t plf) } fib_node_index_t -fib_path_list_create_special (fib_protocol_t nh_proto, +fib_path_list_create_special (dpo_proto_t nh_proto, fib_path_list_flags_t flags, const dpo_id_t *dpo) { diff --git a/src/vnet/fib/fib_path_list.h b/src/vnet/fib/fib_path_list.h index b4b6985b..a54b79e2 100644 --- a/src/vnet/fib/fib_path_list.h +++ b/src/vnet/fib/fib_path_list.h @@ -106,7 +106,7 @@ typedef enum fib_path_list_flags_t_ { extern fib_node_index_t fib_path_list_create(fib_path_list_flags_t flags, const fib_route_path_t *paths); -extern fib_node_index_t fib_path_list_create_special(fib_protocol_t nh_proto, +extern fib_node_index_t fib_path_list_create_special(dpo_proto_t nh_proto, fib_path_list_flags_t flags, const dpo_id_t *dpo); @@ -150,7 +150,7 @@ extern int fib_path_list_recursive_loop_detect(fib_node_index_t path_list_index, extern u32 fib_path_list_get_resolving_interface(fib_node_index_t path_list_index); extern int fib_path_list_is_looped(fib_node_index_t path_list_index); extern int fib_path_list_is_popular(fib_node_index_t path_list_index); -extern fib_protocol_t fib_path_list_get_proto(fib_node_index_t path_list_index); +extern dpo_proto_t fib_path_list_get_proto(fib_node_index_t path_list_index); extern u8 * fib_path_list_format(fib_node_index_t pl_index, u8 * s); extern index_t fib_path_list_lb_map_add_or_lock(fib_node_index_t pl_index, diff --git a/src/vnet/fib/fib_table.c b/src/vnet/fib/fib_table.c index 5aa02dd0..6b6cc5cb 100644 --- a/src/vnet/fib/fib_table.c +++ b/src/vnet/fib/fib_table.c @@ -505,7 +505,7 @@ fib_table_entry_path_add (u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, fib_entry_flag_t flags, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, @@ -664,7 +664,7 @@ void fib_table_entry_path_remove (u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, @@ -755,7 +755,7 @@ fib_table_entry_update_one_path (u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, fib_entry_flag_t flags, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, diff --git a/src/vnet/fib/fib_table.h b/src/vnet/fib/fib_table.h index a65fea74..579740e9 100644 --- a/src/vnet/fib/fib_table.h +++ b/src/vnet/fib/fib_table.h @@ -288,7 +288,7 @@ extern fib_node_index_t fib_table_entry_path_add(u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, fib_entry_flag_t flags, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, @@ -364,7 +364,7 @@ extern fib_node_index_t fib_table_entry_path_add2(u32 fib_index, extern void fib_table_entry_path_remove(u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, @@ -471,7 +471,7 @@ extern fib_node_index_t fib_table_entry_update_one_path(u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, fib_entry_flag_t flags, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, diff --git a/src/vnet/fib/fib_test.c b/src/vnet/fib/fib_test.c index 4c891667..59d5da2a 100644 --- a/src/vnet/fib/fib_test.c +++ b/src/vnet/fib/fib_test.c @@ -833,7 +833,7 @@ fib_test_v4 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -861,7 +861,7 @@ fib_test_v4 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -911,7 +911,7 @@ fib_test_v4 (void) fib_table_entry_path_add(fib_index, &pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -963,7 +963,7 @@ fib_test_v4 (void) pfx.fp_len = 0; fib_table_entry_path_remove(fib_index, &pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // non-recursive path, so no FIB index @@ -1029,7 +1029,7 @@ fib_test_v4 (void) &pfx_11_11_11_11_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_1_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1095,7 +1095,7 @@ fib_test_v4 (void) &pfx_10_10_10_1_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_1_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1110,7 +1110,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_11_11_11_11_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_1_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1144,7 +1144,7 @@ fib_test_v4 (void) &pfx_10_10_10_2_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_2_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1181,7 +1181,7 @@ fib_test_v4 (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1214,7 +1214,7 @@ fib_test_v4 (void) &pfx_1_1_2_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1241,7 +1241,7 @@ fib_test_v4 (void) &pfx_1_1_2_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1280,7 +1280,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_2_0_s_24, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, @@ -1327,7 +1327,7 @@ fib_test_v4 (void) &bgp_100_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_1_1_1_1, ~0, // no index provided. fib_index, // nexthop in same fib as route @@ -1363,7 +1363,7 @@ fib_test_v4 (void) &bgp_101_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_1_1_1_1, ~0, // no index provided. fib_index, // nexthop in same fib as route @@ -1487,7 +1487,7 @@ fib_test_v4 (void) &bgp_200_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, // no index provided. fib_index, // nexthop in same fib as route @@ -1534,7 +1534,7 @@ fib_test_v4 (void) &pfx_1_2_3_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -1545,7 +1545,7 @@ fib_test_v4 (void) &pfx_1_2_3_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, ~0, @@ -1586,7 +1586,7 @@ fib_test_v4 (void) &pfx_1_2_3_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, ~0, @@ -1597,7 +1597,7 @@ fib_test_v4 (void) &pfx_1_2_3_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -1669,7 +1669,7 @@ fib_test_v4 (void) &pfx_6_6_6_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1688,7 +1688,7 @@ fib_test_v4 (void) &pfx_6_6_6_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1770,7 +1770,7 @@ fib_test_v4 (void) &pfx_6_6_6_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -1915,7 +1915,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_6_6_6_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -1995,7 +1995,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_6_6_6_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -2026,7 +2026,7 @@ fib_test_v4 (void) &bgp_44_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_2_3_4_s_32.fp_addr, ~0, fib_index, @@ -2037,7 +2037,7 @@ fib_test_v4 (void) &bgp_44_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_2_3_5_s_32.fp_addr, ~0, fib_index, @@ -2107,7 +2107,7 @@ fib_test_v4 (void) &bgp_201_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_200_s_32.fp_addr, ~0, // no index provided. fib_index, // nexthop in same fib as route @@ -2151,7 +2151,7 @@ fib_test_v4 (void) &pfx_1_1_1_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -2209,7 +2209,7 @@ fib_test_v4 (void) &pfx_1_1_1_0_s_28, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -2244,7 +2244,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_0_s_28, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, @@ -2275,7 +2275,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_0_s_24, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -2316,7 +2316,7 @@ fib_test_v4 (void) &pfx_1_1_1_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -2351,7 +2351,7 @@ fib_test_v4 (void) &bgp_201_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -2362,7 +2362,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_201_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -2375,7 +2375,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_201_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_200_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2405,7 +2405,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_200_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2446,7 +2446,7 @@ fib_test_v4 (void) &bgp_102, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route @@ -2457,7 +2457,7 @@ fib_test_v4 (void) &bgp_102, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2483,7 +2483,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_102, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2492,7 +2492,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_102, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2507,7 +2507,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_100_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2516,7 +2516,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_101_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2546,7 +2546,7 @@ fib_test_v4 (void) &bgp_200_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, ~0, // no index provided. fib_index, // Same as route's FIB @@ -2593,7 +2593,7 @@ fib_test_v4 (void) &bgp_201_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, ~0, // no index provided. fib_index, @@ -2639,7 +2639,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_200_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, ~0, // no index provided. fib_index, // same as route's FIB @@ -2648,7 +2648,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_201_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, ~0, // no index provided. fib_index, // same as route's FIB @@ -2707,7 +2707,7 @@ fib_test_v4 (void) &pfx_5_5_5_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_6_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2718,7 +2718,7 @@ fib_test_v4 (void) &pfx_5_5_5_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_7_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2729,7 +2729,7 @@ fib_test_v4 (void) &pfx_5_5_5_7_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_5_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2768,7 +2768,7 @@ fib_test_v4 (void) &pfx_5_5_5_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -2801,7 +2801,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -2826,7 +2826,7 @@ fib_test_v4 (void) &pfx_5_5_5_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -2868,7 +2868,7 @@ fib_test_v4 (void) &pfx_5_5_5_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_6_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2892,7 +2892,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_5_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_6_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2901,7 +2901,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_7_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2910,7 +2910,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_7_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_5_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2919,7 +2919,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, ~0, // no index provided. fib_index, // same as route's FIB @@ -2943,7 +2943,7 @@ fib_test_v4 (void) &pfx_5_5_5_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_6_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2957,7 +2957,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_6_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2991,7 +2991,7 @@ fib_test_v4 (void) &pfx_23_23_23_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_23_23_23_23_s_32.fp_addr, ~0, // recursive fib_index, @@ -3021,7 +3021,7 @@ fib_test_v4 (void) &pfx_0_0_0_0_s_0, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_23_23_23_23_s_32.fp_addr, ~0, // recursive fib_index, @@ -3051,7 +3051,7 @@ fib_test_v4 (void) &bgp_200_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_1_1_1_1, ~0, fib_index, @@ -3081,7 +3081,7 @@ fib_test_v4 (void) &pfx_1_1_1_0_s_28, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3099,7 +3099,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3116,7 +3116,7 @@ fib_test_v4 (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3140,7 +3140,7 @@ fib_test_v4 (void) &pfx_1_1_1_3_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3152,7 +3152,7 @@ fib_test_v4 (void) &bgp_200_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_3_s_32.fp_addr, ~0, fib_index, @@ -3177,7 +3177,7 @@ fib_test_v4 (void) &bgp_78s[ii], FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_3_s_32.fp_addr, ~0, fib_index, @@ -3188,7 +3188,7 @@ fib_test_v4 (void) &bgp_78s[ii], FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_1_1_1_1, ~0, fib_index, @@ -3238,7 +3238,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3277,7 +3277,7 @@ fib_test_v4 (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3307,7 +3307,7 @@ fib_test_v4 (void) &bgp_200_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -3320,7 +3320,7 @@ fib_test_v4 (void) &bgp_78s[ii], FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -3354,7 +3354,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3391,7 +3391,7 @@ fib_test_v4 (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3412,7 +3412,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_200_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -3421,7 +3421,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_200_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_1_1_1_1, ~0, fib_index, @@ -3430,7 +3430,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_200_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_3_s_32.fp_addr, ~0, fib_index, @@ -3481,7 +3481,7 @@ fib_test_v4 (void) &pfx_4_4_4_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3492,7 +3492,7 @@ fib_test_v4 (void) &pfx_4_4_4_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, @@ -3503,7 +3503,7 @@ fib_test_v4 (void) &pfx_4_4_4_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, tm->hw[0]->sw_if_index, ~0, @@ -3539,7 +3539,7 @@ fib_test_v4 (void) for (ii = 0; ii < 4; ii++) { fib_route_path_t r_path = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_addr = { .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02 + ii), }, @@ -3588,7 +3588,7 @@ fib_test_v4 (void) &pfx_4_4_4_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &zero_addr, ~0, fib_index, @@ -3648,7 +3648,7 @@ fib_test_v4 (void) &pfx_34_34_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, 0, @@ -3659,7 +3659,7 @@ fib_test_v4 (void) &pfx_34_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_34_34_1_1_s_32.fp_addr, ~0, fib_index, @@ -3670,7 +3670,7 @@ fib_test_v4 (void) &pfx_34_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_34_34_1_1_s_32.fp_addr, ~0, fib_index, @@ -3691,7 +3691,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_2_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3700,7 +3700,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3709,7 +3709,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_2_0_s_24, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3751,7 +3751,7 @@ fib_test_v4 (void) &pfx_4_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &zero_addr, tm->hw[0]->sw_if_index, fib_index, @@ -3805,7 +3805,7 @@ fib_test_v4 (void) &pfx_2001_s_64, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, fib_index, @@ -3863,7 +3863,7 @@ fib_test_v4 (void) &pfx_12_10_10_2_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_12_10_10_2_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3897,7 +3897,7 @@ fib_test_v4 (void) &pfx_10_10_10_127_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_127_s_32.fp_addr, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -3945,7 +3945,7 @@ fib_test_v4 (void) &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, tm->hw[0]->sw_if_index, fib_index, @@ -3956,7 +3956,7 @@ fib_test_v4 (void) &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, fib_index, @@ -3975,7 +3975,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, tm->hw[0]->sw_if_index, fib_index, @@ -3992,7 +3992,7 @@ fib_test_v4 (void) &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, tm->hw[0]->sw_if_index, fib_index, @@ -4011,7 +4011,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, fib_index, @@ -4030,7 +4030,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, tm->hw[0]->sw_if_index, fib_index, @@ -4269,7 +4269,7 @@ fib_test_v6 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[0]->sw_if_index, ~0, @@ -4300,7 +4300,7 @@ fib_test_v6 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -4345,7 +4345,7 @@ fib_test_v6 (void) fib_table_entry_path_add(fib_index, &pfx_0_0, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh_2001_2, tm->hw[0]->sw_if_index, ~0, @@ -4389,7 +4389,7 @@ fib_test_v6 (void) */ fib_table_entry_path_remove(fib_index, &pfx_0_0, FIB_SOURCE_API, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh_2001_2, tm->hw[0]->sw_if_index, ~0, @@ -4466,7 +4466,7 @@ fib_test_v6 (void) &pfx_2001_1_2_s_128, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &pfx_2001_1_2_s_128.fp_addr, tm->hw[0]->sw_if_index, ~0, @@ -4505,7 +4505,7 @@ fib_test_v6 (void) &pfx_2001_1_3_s_128, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &pfx_2001_1_3_s_128.fp_addr, tm->hw[0]->sw_if_index, ~0, @@ -4559,7 +4559,7 @@ fib_test_v6 (void) &pfx_2001_a_s_64, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh_2001_2, tm->hw[0]->sw_if_index, ~0, @@ -4573,7 +4573,7 @@ fib_test_v6 (void) &pfx_2001_b_s_64, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh_2001_2, tm->hw[0]->sw_if_index, ~0, @@ -4608,7 +4608,7 @@ fib_test_v6 (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh_2001_2, tm->hw[0]->sw_if_index, ~0, @@ -4646,7 +4646,7 @@ fib_test_v6 (void) &pfx_2001_c_s_64, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[0]->sw_if_index, ~0, @@ -4663,7 +4663,7 @@ fib_test_v6 (void) fib_table_entry_path_remove(fib_index, &pfx_2001_c_s_64, FIB_SOURCE_CLI, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[0]->sw_if_index, ~0, @@ -4748,7 +4748,7 @@ fib_test_v6 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[1]->sw_if_index, ~0, @@ -4767,7 +4767,7 @@ fib_test_v6 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5095,7 +5095,7 @@ fib_test_ae (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, @@ -5111,7 +5111,7 @@ fib_test_ae (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5140,7 +5140,7 @@ fib_test_ae (void) &pfx_10_10_10_1_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_1_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5167,7 +5167,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5209,7 +5209,7 @@ fib_test_ae (void) &pfx_10_10_10_2_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_2_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5243,7 +5243,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5280,7 +5280,7 @@ fib_test_ae (void) &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_3_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5352,7 +5352,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_2_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5375,7 +5375,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5407,7 +5407,7 @@ fib_test_ae (void) &pfx_10_0_0_0_s_8, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_3_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5463,7 +5463,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_1_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5499,7 +5499,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5538,7 +5538,7 @@ fib_test_ae (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, @@ -5626,7 +5626,7 @@ fib_test_pref (void) * 2 high, 2 medium and 2 low preference non-recursive paths */ fib_route_path_t nr_path_hi_1 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[0]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5637,7 +5637,7 @@ fib_test_pref (void) }, }; fib_route_path_t nr_path_hi_2 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[0]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5648,7 +5648,7 @@ fib_test_pref (void) }, }; fib_route_path_t nr_path_med_1 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[1]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5659,7 +5659,7 @@ fib_test_pref (void) }, }; fib_route_path_t nr_path_med_2 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[1]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5670,7 +5670,7 @@ fib_test_pref (void) }, }; fib_route_path_t nr_path_low_1 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[2]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5681,7 +5681,7 @@ fib_test_pref (void) }, }; fib_route_path_t nr_path_low_2 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[2]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5897,7 +5897,7 @@ fib_test_pref (void) }, }; fib_route_path_t r_path_hi = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = 1, @@ -5906,7 +5906,7 @@ fib_test_pref (void) .frp_addr = pfx_1_1_1_1_s_32.fp_addr, }; fib_route_path_t r_path_med = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = 1, @@ -5915,7 +5915,7 @@ fib_test_pref (void) .frp_addr = pfx_1_1_1_2_s_32.fp_addr, }; fib_route_path_t r_path_low = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = 1, @@ -6099,7 +6099,7 @@ fib_test_label (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, @@ -6115,7 +6115,7 @@ fib_test_label (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -6145,7 +6145,7 @@ fib_test_label (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[1]->sw_if_index, ~0, @@ -6161,7 +6161,7 @@ fib_test_label (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6243,7 +6243,7 @@ fib_test_label (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -6282,7 +6282,7 @@ fib_test_label (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_1, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6360,7 +6360,7 @@ fib_test_label (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_2, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6440,7 +6440,7 @@ fib_test_label (void) &pfx_2_2_2_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, fib_index, @@ -6612,7 +6612,7 @@ fib_test_label (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -6669,7 +6669,7 @@ fib_test_label (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_1, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6711,7 +6711,7 @@ fib_test_label (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -6842,7 +6842,7 @@ fib_test_label (void) &pfx_1_1_1_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -6884,7 +6884,7 @@ fib_test_label (void) &pfx_2_2_2_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -6912,7 +6912,7 @@ fib_test_label (void) &pfx_1_1_1_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_1, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6945,7 +6945,7 @@ fib_test_label (void) &pfx_1_1_1_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_1, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6987,7 +6987,7 @@ fib_test_label (void) &pfx_2_2_2_3_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, fib_index, @@ -7031,7 +7031,7 @@ fib_test_label (void) &pfx_2_2_2_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, fib_index, @@ -7081,7 +7081,7 @@ fib_test_label (void) &pfx_2_2_5_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_1, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -7689,7 +7689,7 @@ fib_test_bfd (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -7706,7 +7706,7 @@ fib_test_bfd (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -7780,7 +7780,7 @@ fib_test_bfd (void) &pfx_10_10_10_1_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -7819,7 +7819,7 @@ fib_test_bfd (void) &pfx_10_10_10_2_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -7851,7 +7851,7 @@ fib_test_bfd (void) &pfx_10_10_10_2_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -7907,7 +7907,7 @@ fib_test_bfd (void) &pfx_200_0_0_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, ~0, // recursive 0, // default fib index @@ -7926,7 +7926,7 @@ fib_test_bfd (void) &pfx_200_0_0_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, ~0, // recursive 0, // default fib index @@ -8065,7 +8065,7 @@ fib_test_bfd (void) &pfx_5_5_5_5_s_32, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8096,7 +8096,7 @@ fib_test_bfd (void) &pfx_5_5_5_5_s_32, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8234,7 +8234,7 @@ lfib_test (void) &pfx, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &zero_addr, ~0, fib_index, @@ -8285,7 +8285,7 @@ lfib_test (void) &pfx, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &zero_addr, ~0, lfib_index, @@ -8363,7 +8363,7 @@ lfib_test (void) &pfx_1200, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8389,7 +8389,7 @@ lfib_test (void) }, }; fib_route_path_t *rpaths = NULL, rpath = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_local_label = 1200, .frp_eos = MPLS_NON_EOS, .frp_sw_if_index = ~0, // recurive @@ -8545,7 +8545,7 @@ lfib_test (void) &pfx_2500, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8590,7 +8590,7 @@ lfib_test (void) &pfx_3500, FIB_SOURCE_API, FIB_ENTRY_FLAG_MULTICAST, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8610,7 +8610,7 @@ lfib_test (void) &pfx_3500, FIB_SOURCE_API, FIB_ENTRY_FLAG_MULTICAST, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8637,7 +8637,7 @@ lfib_test (void) &pfx_3500, FIB_SOURCE_API, FIB_ENTRY_FLAG_MULTICAST, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, 5, // rpf-id 0, // default table diff --git a/src/vnet/fib/fib_types.h b/src/vnet/fib/fib_types.h index a209ff3c..f11a55da 100644 --- a/src/vnet/fib/fib_types.h +++ b/src/vnet/fib/fib_types.h @@ -32,9 +32,9 @@ typedef u32 fib_node_index_t; * Protocol Type. packed so it consumes a u8 only */ typedef enum fib_protocol_t_ { - FIB_PROTOCOL_IP4 = 0, - FIB_PROTOCOL_IP6, - FIB_PROTOCOL_MPLS, + FIB_PROTOCOL_IP4 = DPO_PROTO_IP4, + FIB_PROTOCOL_IP6 = DPO_PROTO_IP6, + FIB_PROTOCOL_MPLS = DPO_PROTO_MPLS, } __attribute__ ((packed)) fib_protocol_t; #define FIB_PROTOCOLS { \ @@ -338,7 +338,7 @@ typedef struct fib_route_path_t_ { * The protocol of the address below. We need this since the all * zeros address is ambiguous. */ - fib_protocol_t frp_proto; + dpo_proto_t frp_proto; union { /** diff --git a/src/vnet/interface_format.c b/src/vnet/interface_format.c index df7e9388..5694bb2f 100644 --- a/src/vnet/interface_format.c +++ b/src/vnet/interface_format.c @@ -165,9 +165,15 @@ format_vnet_sw_if_index_name (u8 * s, va_list * args) { vnet_main_t *vnm = va_arg (*args, vnet_main_t *); u32 sw_if_index = va_arg (*args, u32); - return format (s, "%U", - format_vnet_sw_interface_name, vnm, - vnet_get_sw_interface (vnm, sw_if_index)); + vnet_sw_interface_t *si; + + si = vnet_get_sw_interface_safe (vnm, sw_if_index); + + if (NULL == si) + { + return format (s, "DELETED"); + } + return format (s, "%U", format_vnet_sw_interface_name, vnm, si); } u8 * diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index ee17ea88..7a8d7a0c 100755 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -724,7 +724,7 @@ ip4_add_interface_routes (u32 sw_if_index, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, /* No next-hop address */ NULL, sw_if_index, @@ -767,7 +767,7 @@ ip4_add_interface_routes (u32 sw_if_index, fib_table_entry_update_one_path (fib_index, &net_pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &net_pfx.fp_addr, sw_if_index, // invalid FIB index @@ -803,7 +803,7 @@ ip4_add_interface_routes (u32 sw_if_index, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx.fp_addr, sw_if_index, // invalid FIB index diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c index bc66416e..8ae08a01 100644 --- a/src/vnet/ip/ip6_forward.c +++ b/src/vnet/ip/ip6_forward.c @@ -355,7 +355,7 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, /* No next-hop address */ NULL, sw_if_index, /* invalid FIB index */ @@ -390,7 +390,7 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &pfx.fp_addr, sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); diff --git a/src/vnet/ip/ip6_neighbor.c b/src/vnet/ip/ip6_neighbor.c index e8eebd4e..6a9139ab 100644 --- a/src/vnet/ip/ip6_neighbor.c +++ b/src/vnet/ip/ip6_neighbor.c @@ -284,7 +284,7 @@ ip6_neighbor_sw_interface_up_down (vnet_main_t * vnm, (ip6_fib_table_get_index_for_sw_if_index (n->key.sw_if_index), &pfx, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &pfx.fp_addr, n->key.sw_if_index, ~0, 1, FIB_ROUTE_PATH_FLAG_NONE); pool_put (nm->neighbor_pool, n); @@ -645,7 +645,7 @@ vnet_set_ip6_ethernet_neighbor (vlib_main_t * vm, n->fib_entry_index = fib_table_entry_path_add (fib_index, &pfx, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP6, &pfx.fp_addr, + DPO_PROTO_IP6, &pfx.fp_addr, n->key.sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); } @@ -776,7 +776,7 @@ vnet_unset_ip6_ethernet_neighbor (vlib_main_t * vm, (ip6_fib_table_get_index_for_sw_if_index (n->key.sw_if_index), &pfx, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &pfx.fp_addr, n->key.sw_if_index, ~0, 1, FIB_ROUTE_PATH_FLAG_NONE); } pool_put (nm->neighbor_pool, n); @@ -4110,7 +4110,7 @@ ip6_neighbor_proxy_add_del (u32 sw_if_index, ip6_address_t * addr, u8 is_del) fib_table_entry_path_remove (fib_index, &pfx, FIB_SOURCE_IP6_ND_PROXY, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh, sw_if_index, ~0, 1, FIB_ROUTE_PATH_FLAG_NONE); @@ -4124,7 +4124,7 @@ ip6_neighbor_proxy_add_del (u32 sw_if_index, ip6_address_t * addr, u8 is_del) &pfx, FIB_SOURCE_IP6_ND_PROXY, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh, sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c index 4cbf75a3..0676a387 100644 --- a/src/vnet/ip/ip_api.c +++ b/src/vnet/ip/ip_api.c @@ -156,9 +156,9 @@ copy_fib_next_hop (fib_route_path_encode_t * api_rpath, void *fp_arg) int is_ip4; vl_api_fib_path_t *fp = (vl_api_fib_path_t *) fp_arg; - if (api_rpath->rpath.frp_proto == FIB_PROTOCOL_IP4) + if (api_rpath->rpath.frp_proto == DPO_PROTO_IP4) fp->afi = IP46_TYPE_IP4; - else if (api_rpath->rpath.frp_proto == FIB_PROTOCOL_IP6) + else if (api_rpath->rpath.frp_proto == DPO_PROTO_IP6) fp->afi = IP46_TYPE_IP6; else { @@ -714,7 +714,7 @@ add_del_route_t_handler (u8 is_multipath, u8 is_rpf_id, u32 fib_index, const fib_prefix_t * prefix, - u8 next_hop_proto_is_ip4, + dpo_proto_t next_hop_proto, const ip46_address_t * next_hop, u32 next_hop_sw_if_index, u8 next_hop_fib_index, @@ -726,8 +726,7 @@ add_del_route_t_handler (u8 is_multipath, vnet_classify_main_t *cm = &vnet_classify_main; fib_route_path_flags_t path_flags = FIB_ROUTE_PATH_FLAG_NONE; fib_route_path_t path = { - .frp_proto = (next_hop_proto_is_ip4 ? - FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6), + .frp_proto = next_hop_proto, .frp_addr = (NULL == next_hop ? zero_addr : *next_hop), .frp_sw_if_index = next_hop_sw_if_index, .frp_fib_index = next_hop_fib_index, @@ -740,7 +739,7 @@ add_del_route_t_handler (u8 is_multipath, if (MPLS_LABEL_INVALID != next_hop_via_label) { - path.frp_proto = FIB_PROTOCOL_MPLS; + path.frp_proto = DPO_PROTO_MPLS; path.frp_local_label = next_hop_via_label; path.frp_eos = MPLS_NON_EOS; } @@ -855,7 +854,7 @@ int add_del_route_check (fib_protocol_t table_proto, u32 table_id, u32 next_hop_sw_if_index, - fib_protocol_t next_hop_table_proto, + dpo_proto_t next_hop_table_proto, u32 next_hop_table_id, u8 create_missing_tables, u8 is_rpf_id, u32 * fib_index, u32 * next_hop_fib_index) @@ -887,11 +886,18 @@ add_del_route_check (fib_protocol_t table_proto, } else { + fib_protocol_t fib_nh_proto; + + if (next_hop_table_proto > DPO_PROTO_MPLS) + return (0); + + fib_nh_proto = dpo_proto_to_fib (next_hop_table_proto); + if (is_rpf_id) - *next_hop_fib_index = mfib_table_find (next_hop_table_proto, + *next_hop_fib_index = mfib_table_find (fib_nh_proto, ntohl (next_hop_table_id)); else - *next_hop_fib_index = fib_table_find (next_hop_table_proto, + *next_hop_fib_index = fib_table_find (fib_nh_proto, ntohl (next_hop_table_id)); if (~0 == *next_hop_fib_index) @@ -900,12 +906,12 @@ add_del_route_check (fib_protocol_t table_proto, { if (is_rpf_id) *next_hop_fib_index = - mfib_table_find_or_create_and_lock (next_hop_table_proto, + mfib_table_find_or_create_and_lock (fib_nh_proto, ntohl (next_hop_table_id)); else *next_hop_fib_index = - fib_table_find_or_create_and_lock (next_hop_table_proto, + fib_table_find_or_create_and_lock (fib_nh_proto, ntohl (next_hop_table_id)); } @@ -930,7 +936,7 @@ ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp) rv = add_del_route_check (FIB_PROTOCOL_IP4, mp->table_id, mp->next_hop_sw_if_index, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, mp->next_hop_table_id, mp->create_vrf_if_needed, 0, &fib_index, &next_hop_fib_index); @@ -970,7 +976,7 @@ ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp) mp->classify_table_index, mp->is_resolve_host, mp->is_resolve_attached, 0, 0, - fib_index, &pfx, 1, + fib_index, &pfx, DPO_PROTO_IP4, &nh, ntohl (mp->next_hop_sw_if_index), next_hop_fib_index, @@ -990,7 +996,7 @@ ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp) rv = add_del_route_check (FIB_PROTOCOL_IP6, mp->table_id, mp->next_hop_sw_if_index, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, mp->next_hop_table_id, mp->create_vrf_if_needed, 0, &fib_index, &next_hop_fib_index); @@ -1030,7 +1036,7 @@ ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp) mp->classify_table_index, mp->is_resolve_host, mp->is_resolve_attached, 0, 0, - fib_index, &pfx, 0, + fib_index, &pfx, DPO_PROTO_IP6, &nh, ntohl (mp->next_hop_sw_if_index), next_hop_fib_index, mp->next_hop_weight, @@ -1106,7 +1112,7 @@ mroute_add_del_handler (u8 is_add, fib_route_path_t path = { .frp_sw_if_index = next_hop_sw_if_index, - .frp_proto = prefix->fp_proto, + .frp_proto = fib_proto_to_dpo (prefix->fp_proto), }; if (is_local) diff --git a/src/vnet/ip/lookup.c b/src/vnet/ip/lookup.c index 533d010a..41e46070 100755 --- a/src/vnet/ip/lookup.c +++ b/src/vnet/ip/lookup.c @@ -423,7 +423,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, { rpath.frp_weight = 1; rpath.frp_eos = MPLS_NON_EOS; - rpath.frp_proto = FIB_PROTOCOL_MPLS; + rpath.frp_proto = DPO_PROTO_MPLS; rpath.frp_sw_if_index = ~0; vec_add1 (rpaths, rpath); } @@ -449,7 +449,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1 (rpaths, rpath); } @@ -460,7 +460,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1 (rpaths, rpath); } else if (unformat (line_input, "weight %u", &weight)) @@ -479,7 +479,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1 (rpaths, rpath); } else if (unformat (line_input, "via %U next-hop-table %d", @@ -488,7 +488,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1 (rpaths, rpath); } else if (unformat (line_input, "via %U", @@ -501,7 +501,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, rpath.frp_fib_index = table_id; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1 (rpaths, rpath); } else if (unformat (line_input, "via %U", @@ -510,13 +510,13 @@ vnet_ip_route_cmd (vlib_main_t * vm, rpath.frp_fib_index = table_id; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1 (rpaths, rpath); } else if (unformat (line_input, "lookup in table %d", &rpath.frp_fib_index)) { - rpath.frp_proto = pfx.fp_proto; + rpath.frp_proto = fib_proto_to_dpo (pfx.fp_proto); rpath.frp_sw_if_index = ~0; vec_add1 (rpaths, rpath); } @@ -526,7 +526,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = prefixs[0].fp_proto; + rpath.frp_proto = fib_proto_to_dpo (prefixs[0].fp_proto); vec_add1 (rpaths, rpath); } else if (vec_len (prefixs) > 0 && diff --git a/src/vnet/lisp-gpe/lisp_gpe.c b/src/vnet/lisp-gpe/lisp_gpe.c index 0acc7349..018895ad 100644 --- a/src/vnet/lisp-gpe/lisp_gpe.c +++ b/src/vnet/lisp-gpe/lisp_gpe.c @@ -454,7 +454,7 @@ vnet_gpe_add_del_native_fwd_rpath (vnet_gpe_native_fwd_rpath_args_t * a) fib_route_path_t *rpath; u8 ip_version; - ip_version = a->rpath.frp_proto == FIB_PROTOCOL_IP4 ? IP4 : IP6; + ip_version = a->rpath.frp_proto == DPO_PROTO_IP4 ? IP4 : IP6; if (a->is_add) { @@ -511,7 +511,7 @@ gpe_native_forward_command_fn (vlib_main_t * vm, unformat_input_t * input, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; } else if (unformat (line_input, "via %U %U", unformat_ip6_address, @@ -520,21 +520,21 @@ gpe_native_forward_command_fn (vlib_main_t * vm, unformat_input_t * input, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; } else if (unformat (line_input, "via %U", unformat_ip4_address, &rpath.frp_addr.ip4)) { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; } else if (unformat (line_input, "via %U", unformat_ip6_address, &rpath.frp_addr.ip6)) { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; } else { @@ -549,7 +549,8 @@ gpe_native_forward_command_fn (vlib_main_t * vm, unformat_input_t * input, } else { - rpath.frp_fib_index = fib_table_find (rpath.frp_proto, table_id); + rpath.frp_fib_index = + fib_table_find (dpo_proto_to_fib (rpath.frp_proto), table_id); if ((u32) ~ 0 == rpath.frp_fib_index) { error = clib_error_return (0, "Nonexistent table id %d", table_id); diff --git a/src/vnet/lisp-gpe/lisp_gpe_api.c b/src/vnet/lisp-gpe/lisp_gpe_api.c index f1663699..4367a719 100644 --- a/src/vnet/lisp-gpe/lisp_gpe_api.c +++ b/src/vnet/lisp-gpe/lisp_gpe_api.c @@ -455,10 +455,10 @@ static void clib_memcpy (&a->rpath.frp_addr.ip6, mp->nh_addr, sizeof (ip6_address_t)); a->is_add = mp->is_add; - a->rpath.frp_proto = mp->is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6; - a->rpath.frp_fib_index = fib_table_find (a->rpath.frp_proto, - clib_net_to_host_u32 - (mp->table_id)); + a->rpath.frp_proto = mp->is_ip4 ? DPO_PROTO_IP4 : DPO_PROTO_IP6; + a->rpath.frp_fib_index = + fib_table_find (dpo_proto_to_fib (a->rpath.frp_proto), + clib_net_to_host_u32 (mp->table_id)); if (~0 == a->rpath.frp_fib_index) { rv = VNET_API_ERROR_INVALID_VALUE; @@ -484,7 +484,7 @@ gpe_native_fwd_rpaths_copy (vl_api_gpe_native_fwd_rpath_t * dst, vec_foreach (e, src) { memset (&dst[i], 0, sizeof (*dst)); - table = fib_table_get (e->frp_fib_index, e->frp_proto); + table = fib_table_get (e->frp_fib_index, dpo_proto_to_fib (e->frp_proto)); dst[i].fib_index = table->ft_table_id; dst[i].nh_sw_if_index = e->frp_sw_if_index; dst[i].is_ip4 = is_ip4; diff --git a/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c b/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c index 395b493a..ac048149 100644 --- a/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c +++ b/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c @@ -225,6 +225,7 @@ lisp_gpe_mk_fib_paths (const lisp_fwd_path_t * paths) { const lisp_gpe_adjacency_t *ladj; fib_route_path_t *rpaths = NULL; + fib_protocol_t fp; u8 best_priority; u32 ii; @@ -239,9 +240,9 @@ lisp_gpe_mk_fib_paths (const lisp_fwd_path_t * paths) ladj = lisp_gpe_adjacency_get (paths[ii].lisp_adj); - ip_address_to_46 (&ladj->remote_rloc, - &rpaths[ii].frp_addr, &rpaths[ii].frp_proto); + ip_address_to_46 (&ladj->remote_rloc, &rpaths[ii].frp_addr, &fp); + rpaths[ii].frp_proto = fib_proto_to_dpo (fp); rpaths[ii].frp_sw_if_index = ladj->sw_if_index; rpaths[ii].frp_weight = (paths[ii].weight ? paths[ii].weight : 1); } diff --git a/src/vnet/mfib/ip6_mfib.c b/src/vnet/mfib/ip6_mfib.c index 5c6f8126..5e48e919 100644 --- a/src/vnet/mfib/ip6_mfib.c +++ b/src/vnet/mfib/ip6_mfib.c @@ -158,7 +158,7 @@ ip6_create_mfib_with_table_id (u32 table_id) .fp_proto = FIB_PROTOCOL_IP6, }; const fib_route_path_t path_for_us = { - .frp_proto = FIB_PROTOCOL_IP6, + .frp_proto = DPO_PROTO_IP6, .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, @@ -222,7 +222,7 @@ ip6_mfib_table_destroy (ip6_mfib_t *mfib) .fp_proto = FIB_PROTOCOL_IP6, }; const fib_route_path_t path_for_us = { - .frp_proto = FIB_PROTOCOL_IP6, + .frp_proto = DPO_PROTO_IP6, .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, @@ -259,7 +259,7 @@ void ip6_mfib_interface_enable_disable (u32 sw_if_index, int is_enable) { const fib_route_path_t path = { - .frp_proto = FIB_PROTOCOL_IP6, + .frp_proto = DPO_PROTO_IP6, .frp_addr = zero_addr, .frp_sw_if_index = sw_if_index, .frp_fib_index = ~0, diff --git a/src/vnet/mfib/mfib_entry.c b/src/vnet/mfib/mfib_entry.c index cf25b67a..b37f8825 100644 --- a/src/vnet/mfib/mfib_entry.c +++ b/src/vnet/mfib/mfib_entry.c @@ -764,18 +764,16 @@ mfib_entry_update (fib_node_index_t mfib_entry_index, * entry */ fib_node_index_t old_pl_index; - fib_protocol_t fp; + dpo_proto_t dp; dpo_id_t dpo = DPO_INVALID; - fp = mfib_entry_get_proto(mfib_entry); + dp = fib_proto_to_dpo(mfib_entry_get_proto(mfib_entry)); old_pl_index = msrc->mfes_pl; - dpo_set(&dpo, DPO_REPLICATE, - fib_proto_to_dpo(fp), - repi); + dpo_set(&dpo, DPO_REPLICATE, dp, repi); msrc->mfes_pl = - fib_path_list_create_special(fp, + fib_path_list_create_special(dp, FIB_PATH_LIST_FLAG_EXCLUSIVE, &dpo); diff --git a/src/vnet/mfib/mfib_test.c b/src/vnet/mfib/mfib_test.c index 7c92ae99..57787eca 100644 --- a/src/vnet/mfib/mfib_test.c +++ b/src/vnet/mfib/mfib_test.c @@ -387,7 +387,7 @@ mfib_test_i (fib_protocol_t PROTO, fib_route_path_t path_via_if0 = { - .frp_proto = PROTO, + .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[0]->sw_if_index, .frp_fib_index = ~0, @@ -411,7 +411,7 @@ mfib_test_i (fib_protocol_t PROTO, MFIB_ITF_FLAG_ACCEPT)); fib_route_path_t path_via_if1 = { - .frp_proto = PROTO, + .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[1]->sw_if_index, .frp_fib_index = ~0, @@ -419,7 +419,7 @@ mfib_test_i (fib_protocol_t PROTO, .frp_flags = 0, }; fib_route_path_t path_via_if2 = { - .frp_proto = PROTO, + .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[2]->sw_if_index, .frp_fib_index = ~0, @@ -427,7 +427,7 @@ mfib_test_i (fib_protocol_t PROTO, .frp_flags = 0, }; fib_route_path_t path_via_if3 = { - .frp_proto = PROTO, + .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[3]->sw_if_index, .frp_fib_index = ~0, @@ -435,7 +435,7 @@ mfib_test_i (fib_protocol_t PROTO, .frp_flags = 0, }; fib_route_path_t path_for_us = { - .frp_proto = PROTO, + .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, @@ -1121,7 +1121,7 @@ mfib_test_i (fib_protocol_t PROTO, &pfx_3500, FIB_SOURCE_API, FIB_ENTRY_FLAG_MULTICAST, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1138,7 +1138,7 @@ mfib_test_i (fib_protocol_t PROTO, * An (S,G) that resolves via the mLDP head-end */ fib_route_path_t path_via_mldp = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_local_label = pfx_3500.fp_label, .frp_eos = MPLS_EOS, .frp_sw_if_index = 0xffffffff, diff --git a/src/vnet/mpls/mpls.api b/src/vnet/mpls/mpls.api index 67f1045d..5973a0a6 100644 --- a/src/vnet/mpls/mpls.api +++ b/src/vnet/mpls/mpls.api @@ -156,7 +156,7 @@ manual_endian manual_print define mpls_tunnel_details @param mr_is_interface_rx - Interface Receive path @param mr_is_interface_rx - RPF-ID Receive path. The next-hop interface is used as the RPF-ID - @param mr_next_hop_proto_is_ip4 - The next-hop is IPV4 + @param mr_next_hop_proto - The next-hop protocol, of type dpo_proto_t @param mr_next_hop_weight - The weight, for UCMP @param mr_next_hop[16] - the nextop address @param mr_next_hop_sw_if_index - the next-hop SW interface @@ -182,7 +182,7 @@ autoreply define mpls_route_add_del u8 mr_is_resolve_attached; u8 mr_is_interface_rx; u8 mr_is_rpf_id; - u8 mr_next_hop_proto_is_ip4; + u8 mr_next_hop_proto; u8 mr_next_hop_weight; u8 mr_next_hop_preference; u8 mr_next_hop[16]; diff --git a/src/vnet/mpls/mpls.c b/src/vnet/mpls/mpls.c index 068d31f4..266ba42c 100644 --- a/src/vnet/mpls/mpls.c +++ b/src/vnet/mpls/mpls.c @@ -261,7 +261,7 @@ vnet_mpls_local_label (vlib_main_t * vm, &rpath.frp_sw_if_index, &rpath.frp_weight)) { - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1(rpaths, rpath); } @@ -272,7 +272,7 @@ vnet_mpls_local_label (vlib_main_t * vm, &rpath.frp_sw_if_index, &rpath.frp_weight)) { - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1(rpaths, rpath); } @@ -283,7 +283,7 @@ vnet_mpls_local_label (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1(rpaths, rpath); } else if (unformat (line_input, "rx-ip4 %U", @@ -291,7 +291,7 @@ vnet_mpls_local_label (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; rpath.frp_flags = FIB_ROUTE_PATH_INTF_RX; vec_add1(rpaths, rpath); } @@ -302,7 +302,7 @@ vnet_mpls_local_label (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1(rpaths, rpath); } else if (unformat (line_input, "via %U next-hop-table %d", @@ -312,7 +312,7 @@ vnet_mpls_local_label (vlib_main_t * vm, { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1(rpaths, rpath); } else if (unformat (line_input, "via %U next-hop-table %d", @@ -322,7 +322,7 @@ vnet_mpls_local_label (vlib_main_t * vm, { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1(rpaths, rpath); } else if (unformat (line_input, "via %U", @@ -336,7 +336,7 @@ vnet_mpls_local_label (vlib_main_t * vm, rpath.frp_fib_index = table_id; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1(rpaths, rpath); } else if (unformat (line_input, "via %U", @@ -346,7 +346,7 @@ vnet_mpls_local_label (vlib_main_t * vm, rpath.frp_fib_index = table_id; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1(rpaths, rpath); } else if (unformat (line_input, "%d", &local_label)) @@ -355,7 +355,7 @@ vnet_mpls_local_label (vlib_main_t * vm, "ip4-lookup-in-table %d", &rpath.frp_fib_index)) { - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID; pfx.fp_payload_proto = DPO_PROTO_IP4; vec_add1(rpaths, rpath); @@ -364,7 +364,7 @@ vnet_mpls_local_label (vlib_main_t * vm, "ip6-lookup-in-table %d", &rpath.frp_fib_index)) { - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID; vec_add1(rpaths, rpath); pfx.fp_payload_proto = DPO_PROTO_IP6; @@ -373,11 +373,21 @@ vnet_mpls_local_label (vlib_main_t * vm, "mpls-lookup-in-table %d", &rpath.frp_fib_index)) { - rpath.frp_proto = FIB_PROTOCOL_MPLS; + rpath.frp_proto = DPO_PROTO_MPLS; rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID; pfx.fp_payload_proto = DPO_PROTO_MPLS; vec_add1(rpaths, rpath); } + else if (unformat (line_input, + "l2-input-on %U", + unformat_vnet_sw_interface, vnm, + &rpath.frp_sw_if_index)) + { + rpath.frp_proto = DPO_PROTO_ETHERNET; + pfx.fp_payload_proto = DPO_PROTO_ETHERNET; + rpath.frp_flags = FIB_ROUTE_PATH_INTF_RX; + vec_add1(rpaths, rpath); + } else if (unformat (line_input, "out-label %U", unformat_mpls_unicast_label, &out_label)) @@ -440,7 +450,7 @@ vnet_mpls_local_label (vlib_main_t * vm, pfx.fp_proto = FIB_PROTOCOL_MPLS; pfx.fp_len = 21; pfx.fp_label = local_label; - pfx.fp_payload_proto = fib_proto_to_dpo(rpaths[0].frp_proto); + pfx.fp_payload_proto = rpaths[0].frp_proto; /* * the CLI parsing stored table Ids, swap to FIB indicies diff --git a/src/vnet/mpls/mpls_api.c b/src/vnet/mpls/mpls_api.c index 92fb24a6..737299e6 100644 --- a/src/vnet/mpls/mpls_api.c +++ b/src/vnet/mpls/mpls_api.c @@ -144,14 +144,7 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm, }; if (pfx.fp_eos) { - if (mp->mr_next_hop_proto_is_ip4) - { - pfx.fp_payload_proto = DPO_PROTO_IP4; - } - else - { - pfx.fp_payload_proto = DPO_PROTO_IP6; - } + pfx.fp_payload_proto = mp->mr_next_hop_proto; } else { @@ -161,7 +154,7 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm, rv = add_del_route_check (FIB_PROTOCOL_MPLS, mp->mr_table_id, mp->mr_next_hop_sw_if_index, - dpo_proto_to_fib (pfx.fp_payload_proto), + pfx.fp_payload_proto, mp->mr_next_hop_table_id, mp->mr_create_table_if_needed, mp->mr_is_rpf_id, @@ -173,9 +166,9 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm, ip46_address_t nh; memset (&nh, 0, sizeof (nh)); - if (mp->mr_next_hop_proto_is_ip4) + if (DPO_PROTO_IP4 == mp->mr_next_hop_proto) memcpy (&nh.ip4, mp->mr_next_hop, sizeof (nh.ip4)); - else + else if (DPO_PROTO_IP6 == mp->mr_next_hop_proto) memcpy (&nh.ip6, mp->mr_next_hop, sizeof (nh.ip6)); n_labels = mp->mr_next_hop_n_out_labels; @@ -202,7 +195,7 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm, mp->mr_is_interface_rx, mp->mr_is_rpf_id, fib_index, &pfx, - mp->mr_next_hop_proto_is_ip4, + mp->mr_next_hop_proto, &nh, ntohl (mp->mr_next_hop_sw_if_index), next_hop_fib_index, mp->mr_next_hop_weight, @@ -243,13 +236,13 @@ vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp) if (mp->mt_next_hop_proto_is_ip4) { - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; clib_memcpy (&rpath.frp_addr.ip4, mp->mt_next_hop, sizeof (rpath.frp_addr.ip4)); } else { - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; clib_memcpy (&rpath.frp_addr.ip6, mp->mt_next_hop, sizeof (rpath.frp_addr.ip6)); } diff --git a/src/vnet/mpls/mpls_tunnel.c b/src/vnet/mpls/mpls_tunnel.c index c025cc58..6452a60b 100644 --- a/src/vnet/mpls/mpls_tunnel.c +++ b/src/vnet/mpls/mpls_tunnel.c @@ -171,7 +171,7 @@ mpls_tunnel_mk_lb (mpls_tunnel_t *mt, vec_validate(ctx.next_hops, fib_path_list_get_n_paths(mt->mt_path_list)); vec_reset_length(ctx.next_hops); - lb_proto = vnet_link_to_dpo_proto(linkt); + lb_proto = fib_forw_chain_type_to_dpo_proto(fct); fib_path_list_walk(mt->mt_path_list, mpls_tunnel_collect_forwarding, @@ -313,12 +313,34 @@ mpls_tunnel_restack (mpls_tunnel_t *mt) /* * walk all the adjacencies on the MPLS interface and restack them */ - FOR_EACH_FIB_PROTOCOL(proto) + if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2) { - adj_nbr_walk(mt->mt_sw_if_index, - proto, - mpls_adj_walk_cb, - NULL); + /* + * Stack a load-balance that drops, whilst we have no paths + */ + vnet_hw_interface_t * hi; + dpo_id_t dpo = DPO_INVALID; + + mpls_tunnel_mk_lb(mt, + VNET_LINK_MPLS, + FIB_FORW_CHAIN_TYPE_ETHERNET, + &dpo); + + hi = vnet_get_hw_interface(vnet_get_main(), mt->mt_hw_if_index); + dpo_stack_from_node(hi->tx_node_index, + &mt->mt_l2_lb, + &dpo); + dpo_reset(&dpo); + } + else + { + FOR_EACH_FIB_PROTOCOL(proto) + { + adj_nbr_walk(mt->mt_sw_if_index, + proto, + mpls_adj_walk_cb, + NULL); + } } } @@ -495,7 +517,7 @@ mpls_tunnel_tx (vlib_main_t * vm, b0 = vlib_get_buffer(vm, bi0); - vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_adj; + vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_lb.dpoi_index; if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -506,7 +528,7 @@ mpls_tunnel_tx (vlib_main_t * vm, vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, - bi0, mt->mt_l2_tx_arc); + bi0, mt->mt_l2_lb.dpoi_next_node); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); @@ -565,8 +587,7 @@ vnet_mpls_tunnel_del (u32 sw_if_index) if (FIB_NODE_INDEX_INVALID != mt->mt_path_list) fib_path_list_child_remove(mt->mt_path_list, mt->mt_sibling_index); - if (ADJ_INDEX_INVALID != mt->mt_l2_adj) - adj_unlock(mt->mt_l2_adj); + dpo_reset(&mt->mt_l2_lb); vec_add1 (mpls_tunnel_free_hw_if_indices, mt->mt_hw_if_index); pool_put(mpls_tunnel_pool, mt); @@ -587,12 +608,13 @@ vnet_mpls_tunnel_create (u8 l2_only, memset (mt, 0, sizeof (*mt)); mti = mt - mpls_tunnel_pool; fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL); - mt->mt_l2_adj = ADJ_INDEX_INVALID; mt->mt_path_list = FIB_NODE_INDEX_INVALID; mt->mt_sibling_index = FIB_NODE_INDEX_INVALID; if (is_multicast) mt->mt_flags |= MPLS_TUNNEL_FLAG_MCAST; + if (l2_only) + mt->mt_flags |= MPLS_TUNNEL_FLAG_L2; /* * Create a new, or re=use and old, tunnel HW interface @@ -614,7 +636,7 @@ vnet_mpls_tunnel_create (u8 l2_only, mti, mpls_tunnel_hw_interface_class.index, mti); - hi = vnet_get_hw_interface(vnm, mt->mt_hw_if_index); + hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index); } /* @@ -624,19 +646,6 @@ vnet_mpls_tunnel_create (u8 l2_only, vec_validate_init_empty(mpls_tunnel_db, mt->mt_sw_if_index, ~0); mpls_tunnel_db[mt->mt_sw_if_index] = mti; - if (l2_only) - { - mt->mt_l2_adj = - adj_nbr_add_or_lock(fib_path_list_get_proto(mt->mt_path_list), - VNET_LINK_ETHERNET, - &zero_addr, - mt->mt_sw_if_index); - - mt->mt_l2_tx_arc = vlib_node_add_named_next(vlib_get_main(), - hi->tx_node_index, - "adj-l2-midchain"); - } - return (mt->mt_sw_if_index); } @@ -803,7 +812,7 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; } else if (unformat (line_input, "via %U %U", @@ -813,7 +822,7 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; } else if (unformat (line_input, "via %U", unformat_ip6_address, @@ -822,7 +831,7 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, rpath.frp_fib_index = 0; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; } else if (unformat (line_input, "via %U", unformat_ip4_address, @@ -831,7 +840,7 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, rpath.frp_fib_index = 0; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; } else if (unformat (line_input, "l2-only")) l2_only = 1; @@ -915,6 +924,14 @@ format_mpls_tunnel (u8 * s, va_list * args) s = format(s, "%U", format_fib_path_ext_list, &mt->mt_path_exts); s = format(s, "\n"); + if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2) + { + s = format(s, " forwarding: %U\n", + format_fib_forw_chain_type, + FIB_FORW_CHAIN_TYPE_ETHERNET); + s = format(s, " %U\n", format_dpo_id, &mt->mt_l2_lb, 2); + } + return (s); } diff --git a/src/vnet/mpls/mpls_tunnel.h b/src/vnet/mpls/mpls_tunnel.h index 4cb0a860..285817c3 100644 --- a/src/vnet/mpls/mpls_tunnel.h +++ b/src/vnet/mpls/mpls_tunnel.h @@ -22,15 +22,20 @@ typedef enum mpls_tunnel_attribute_t_ { MPLS_TUNNEL_ATTRIBUTE_FIRST = 0, + /** + * @brief The tunnel is L2 only + */ + MPLS_TUNNEL_ATTRIBUTE_L2 = MPLS_TUNNEL_ATTRIBUTE_FIRST, /** * @brief The tunnel has an underlying multicast LSP */ - MPLS_TUNNEL_ATTRIBUTE_MCAST = MPLS_TUNNEL_ATTRIBUTE_FIRST, + MPLS_TUNNEL_ATTRIBUTE_MCAST, MPLS_TUNNEL_ATTRIBUTE_LAST = MPLS_TUNNEL_ATTRIBUTE_MCAST, } mpls_tunnel_attribute_t; #define MPLS_TUNNEL_ATTRIBUTES { \ [MPLS_TUNNEL_ATTRIBUTE_MCAST] = "multicast", \ + [MPLS_TUNNEL_ATTRIBUTE_L2] = "L2", \ } #define FOR_EACH_MPLS_TUNNEL_ATTRIBUTE(_item) \ for (_item = MPLS_TUNNEL_ATTRIBUTE_FIRST; \ @@ -39,6 +44,7 @@ typedef enum mpls_tunnel_attribute_t_ typedef enum mpls_tunnel_flag_t_ { MPLS_TUNNEL_FLAG_NONE = 0, + MPLS_TUNNEL_FLAG_L2 = (1 << MPLS_TUNNEL_ATTRIBUTE_L2), MPLS_TUNNEL_FLAG_MCAST = (1 << MPLS_TUNNEL_ATTRIBUTE_MCAST), } __attribute__ ((packed)) mpls_tunnel_flags_t; @@ -60,14 +66,19 @@ typedef struct mpls_tunnel_t_ /** * @brief If the tunnel is an L2 tunnel, this is the link type ETHERNET - * adjacency + * load-balance + */ + dpo_id_t mt_l2_lb; + + /** + * @brief The HW interface index of the tunnel interfaces */ - adj_index_t mt_l2_adj; + u32 mt_hw_if_index; /** - * @brief on a L2 tunnel this is the VLIB arc from the L2-tx to the l2-midchain + * @brief The SW interface index of the tunnel interfaces */ - u32 mt_l2_tx_arc; + u32 mt_sw_if_index; /** * @brief The path-list over which the tunnel's destination is reachable @@ -83,23 +94,6 @@ typedef struct mpls_tunnel_t_ * A vector of path extensions o hold the label stack for each path */ fib_path_ext_list_t mt_path_exts; - - /** - * @brief Flag to indicate the tunnel is only for L2 traffic, that is - * this tunnel belongs in a bridge domain. - */ - u8 mt_l2_only; - - /** - * @brief The HW interface index of the tunnel interfaces - */ - u32 mt_hw_if_index; - - /** - * @brief The SW interface index of the tunnel interfaces - */ - u32 mt_sw_if_index; - } mpls_tunnel_t; /** diff --git a/src/vnet/srmpls/sr_mpls_policy.c b/src/vnet/srmpls/sr_mpls_policy.c index 5ebbc60d..db4ad2a7 100755 --- a/src/vnet/srmpls/sr_mpls_policy.c +++ b/src/vnet/srmpls/sr_mpls_policy.c @@ -75,7 +75,7 @@ create_sl (mpls_sr_policy_t * sr_policy, mpls_label_t * sl, u32 weight) segment_list->segments = vec_dup (sl); fib_route_path_t path = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = segment_list->weight, @@ -203,7 +203,7 @@ sr_mpls_policy_del (mpls_label_t bsid, u32 index) segment_list = pool_elt_at_index (sm->sid_lists, *sl_index); fib_route_path_t path = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = segment_list->weight, @@ -308,7 +308,7 @@ sr_mpls_policy_mod (mpls_label_t bsid, u32 index, u8 operation, mpls_eos_bit_t eos; fib_route_path_t path = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = segment_list->weight, diff --git a/src/vnet/srmpls/sr_mpls_steering.c b/src/vnet/srmpls/sr_mpls_steering.c index 37707049..3a9aea2d 100755 --- a/src/vnet/srmpls/sr_mpls_steering.c +++ b/src/vnet/srmpls/sr_mpls_steering.c @@ -218,7 +218,7 @@ sr_mpls_steering_policy (int is_del, mpls_label_t bsid, u32 sr_policy_index, update_fib:; fib_route_path_t path = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_local_label = sr_policy->bsid, .frp_eos = MPLS_EOS, .frp_sw_if_index = ~0, diff --git a/src/vnet/srv6/sr_steering.c b/src/vnet/srv6/sr_steering.c index a7903751..704adaa7 100755 --- a/src/vnet/srv6/sr_steering.c +++ b/src/vnet/srv6/sr_steering.c @@ -310,7 +310,7 @@ update_fib: table_id : 0)), &pfx, FIB_SOURCE_SR, FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, (ip46_address_t *) & sr_policy->bsid, ~0, sm->fib_table_ip6, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); @@ -327,7 +327,7 @@ update_fib: table_id : 0)), &pfx, FIB_SOURCE_SR, FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, (ip46_address_t *) & sr_policy->bsid, ~0, sm->fib_table_ip4, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); diff --git a/src/vnet/vxlan-gpe/vxlan_gpe.c b/src/vnet/vxlan-gpe/vxlan_gpe.c index 97bb1b15..462c79a0 100644 --- a/src/vnet/vxlan-gpe/vxlan_gpe.c +++ b/src/vnet/vxlan-gpe/vxlan_gpe.c @@ -638,7 +638,7 @@ int vnet_vxlan_gpe_add_del_tunnel fib_node_index_t mfei; adj_index_t ai; fib_route_path_t path = { - .frp_proto = fp, + .frp_proto = fib_proto_to_dpo(fp), .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, diff --git a/src/vnet/vxlan/vxlan.c b/src/vnet/vxlan/vxlan.c index 1b3df2a8..dc973372 100644 --- a/src/vnet/vxlan/vxlan.c +++ b/src/vnet/vxlan/vxlan.c @@ -505,7 +505,7 @@ int vnet_vxlan_add_del_tunnel fib_node_index_t mfei; adj_index_t ai; fib_route_path_t path = { - .frp_proto = fp, + .frp_proto = fib_proto_to_dpo(fp), .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, diff --git a/src/vpp/app/vpe_cli.c b/src/vpp/app/vpe_cli.c index 94bdc84c..fcc496ad 100644 --- a/src/vpp/app/vpe_cli.c +++ b/src/vpp/app/vpe_cli.c @@ -98,7 +98,7 @@ virtual_ip_cmd_fn_command_fn (vlib_main_t * vm, vec_add2 (rpaths, rpath, 1); - rpath->frp_proto = FIB_PROTOCOL_IP4; + rpath->frp_proto = DPO_PROTO_IP4; rpath->frp_addr = next_hops[i]; rpath->frp_sw_if_index = sw_if_index; rpath->frp_fib_index = ~0; diff --git a/test/test_bfd.py b/test/test_bfd.py index be42cdad..4cb6d379 100644 --- a/test/test_bfd.py +++ b/test/test_bfd.py @@ -20,7 +20,7 @@ from vpp_pg_interface import CaptureTimeoutError, is_ipv6_misc from vpp_lo_interface import VppLoInterface from util import ppp from vpp_papi_provider import UnexpectedApiReturnValueError -from vpp_ip_route import VppIpRoute, VppRoutePath +from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto USEC_IN_SEC = 1000000 @@ -1678,12 +1678,12 @@ class BFDFIBTestCase(VppTestCase): ip_2001_s_64 = VppIpRoute(self, "2001::", 64, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - is_ip6=1)], + proto=DPO_PROTO_IP6)], is_ip6=1) ip_2002_s_64 = VppIpRoute(self, "2002::", 64, [VppRoutePath(self.pg0.remote_ip6, 0xffffffff, - is_ip6=1)], + proto=DPO_PROTO_IP6)], is_ip6=1) ip_2001_s_64.add_vpp_config() ip_2002_s_64.add_vpp_config() diff --git a/test/test_gre.py b/test/test_gre.py index 18b67dbd..1afc44fb 100644 --- a/test/test_gre.py +++ b/test/test_gre.py @@ -6,7 +6,7 @@ from logging import * from framework import VppTestCase, VppTestRunner from vpp_sub_interface import VppDot1QSubint from vpp_gre_interface import VppGreInterface, VppGre6Interface -from vpp_ip_route import VppIpRoute, VppRoutePath +from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto from vpp_papi_provider import L2_VTR_OP from scapy.packet import Raw @@ -516,11 +516,12 @@ class TestGRE(VppTestCase): gre_if.admin_up() gre_if.config_ip6() - route_via_tun = VppIpRoute(self, "4004::1", 128, - [VppRoutePath("0::0", - gre_if.sw_if_index, - is_ip6=1)], - is_ip6=1) + route_via_tun = VppIpRoute( + self, "4004::1", 128, + [VppRoutePath("0::0", + gre_if.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) route_via_tun.add_vpp_config() @@ -542,11 +543,12 @@ class TestGRE(VppTestCase): # # Add a route that resolves the tunnel's destination # - route_tun_dst = VppIpRoute(self, "1002::1", 128, - [VppRoutePath(self.pg2.remote_ip6, - self.pg2.sw_if_index, - is_ip6=1)], - is_ip6=1) + route_tun_dst = VppIpRoute( + self, "1002::1", 128, + [VppRoutePath(self.pg2.remote_ip6, + self.pg2.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) route_tun_dst.add_vpp_config() # diff --git a/test/test_ip6.py b/test/test_ip6.py index 593f6868..285ce181 100644 --- a/test/test_ip6.py +++ b/test/test_ip6.py @@ -8,7 +8,7 @@ from vpp_sub_interface import VppSubInterface, VppDot1QSubint from vpp_pg_interface import is_ipv6_misc from vpp_ip_route import VppIpRoute, VppRoutePath, find_route, VppIpMRoute, \ VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind, \ - VppMplsRoute + VppMplsRoute, DpoProto from vpp_neighbor import find_nbr, VppNeighbor from scapy.packet import Raw @@ -490,7 +490,7 @@ class TestIPv6(TestIPv6ND): inet=AF_INET6)) def test_ns_duplicates(self): - """ ARP Duplicates""" + """ ND Duplicates""" # # Generate some hosts on the LAN @@ -537,7 +537,7 @@ class TestIPv6(TestIPv6ND): # # remove the duplicate on pg1 - # packet stream shoud generate ARPs out of pg1 + # packet stream shoud generate NSs out of pg1 # ns_pg1.remove_vpp_config() @@ -1347,10 +1347,10 @@ class TestIP6LoadBalance(VppTestCase): route_3000_1 = VppIpRoute(self, "3000::1", 128, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, - is_ip6=1), + proto=DpoProto.DPO_PROTO_IP6), VppRoutePath(self.pg2.remote_ip6, self.pg2.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_3000_1.add_vpp_config() @@ -1367,11 +1367,11 @@ class TestIP6LoadBalance(VppTestCase): [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, labels=[67], - is_ip6=1), + proto=DpoProto.DPO_PROTO_IP6), VppRoutePath(self.pg2.remote_ip6, self.pg2.sw_if_index, labels=[67], - is_ip6=1)]) + proto=DpoProto.DPO_PROTO_IP6)]) route_67.add_vpp_config() # @@ -1441,20 +1441,20 @@ class TestIP6LoadBalance(VppTestCase): route_3000_2 = VppIpRoute(self, "3000::2", 128, [VppRoutePath(self.pg3.remote_ip6, self.pg3.sw_if_index, - is_ip6=1), + proto=DpoProto.DPO_PROTO_IP6), VppRoutePath(self.pg4.remote_ip6, self.pg4.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_3000_2.add_vpp_config() route_4000_1 = VppIpRoute(self, "4000::1", 128, [VppRoutePath("3000::1", 0xffffffff, - is_ip6=1), + proto=DpoProto.DPO_PROTO_IP6), VppRoutePath("3000::2", 0xffffffff, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_4000_1.add_vpp_config() @@ -1485,14 +1485,14 @@ class TestIP6LoadBalance(VppTestCase): route_5000_2 = VppIpRoute(self, "5000::2", 128, [VppRoutePath(self.pg3.remote_ip6, self.pg3.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_5000_2.add_vpp_config() route_6000_1 = VppIpRoute(self, "6000::1", 128, [VppRoutePath("5000::2", 0xffffffff, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_6000_1.add_vpp_config() diff --git a/test/test_map.py b/test/test_map.py index 9ac3948a..bbf4aec2 100644 --- a/test/test_map.py +++ b/test/test_map.py @@ -4,7 +4,7 @@ import unittest import socket from framework import VppTestCase, VppTestRunner -from vpp_ip_route import VppIpRoute, VppRoutePath +from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto from scapy.layers.l2 import Ether, Raw from scapy.layers.inet import IP, UDP, ICMP @@ -75,7 +75,7 @@ class TestMAP(VppTestCase): map_br_pfx_len, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) map_route.add_vpp_config() @@ -138,13 +138,12 @@ class TestMAP(VppTestCase): # Add a route to 4001::1. Expect the encapped traffic to be # sent via that routes next-hop # - pre_res_route = VppIpRoute(self, - "4001::1", - 128, - [VppRoutePath(self.pg1.remote_hosts[2].ip6, - self.pg1.sw_if_index, - is_ip6=1)], - is_ip6=1) + pre_res_route = VppIpRoute( + self, "4001::1", 128, + [VppRoutePath(self.pg1.remote_hosts[2].ip6, + self.pg1.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) pre_res_route.add_vpp_config() self.send_and_assert_encapped(v4, map_src, @@ -156,7 +155,7 @@ class TestMAP(VppTestCase): # pre_res_route.modify([VppRoutePath(self.pg1.remote_hosts[3].ip6, self.pg1.sw_if_index, - is_ip6=1)]) + proto=DpoProto.DPO_PROTO_IP6)]) pre_res_route.add_vpp_config() self.send_and_assert_encapped(v4, map_src, diff --git a/test/test_mpls.py b/test/test_mpls.py index e3d013af..b2226a74 100644 --- a/test/test_mpls.py +++ b/test/test_mpls.py @@ -6,7 +6,7 @@ import socket from framework import VppTestCase, VppTestRunner from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \ VppMplsIpBind, VppIpMRoute, VppMRoutePath, \ - MRouteItfFlags, MRouteEntryFlags + MRouteItfFlags, MRouteEntryFlags, DpoProto from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface from scapy.packet import Raw @@ -16,6 +16,38 @@ from scapy.layers.inet6 import IPv6 from scapy.contrib.mpls import MPLS +def verify_filter(capture, sent): + if not len(capture) == len(sent): + # filter out any IPv6 RAs from the capture + for p in capture: + if p.haslayer(IPv6): + capture.remove(p) + return capture + + +def verify_mpls_stack(tst, rx, mpls_labels, ttl=255, num=0): + # the rx'd packet has the MPLS label popped + eth = rx[Ether] + tst.assertEqual(eth.type, 0x8847) + + rx_mpls = rx[MPLS] + + for ii in range(len(mpls_labels)): + tst.assertEqual(rx_mpls.label, mpls_labels[ii]) + tst.assertEqual(rx_mpls.cos, 0) + if ii == num: + tst.assertEqual(rx_mpls.ttl, ttl) + else: + tst.assertEqual(rx_mpls.ttl, 255) + if ii == len(mpls_labels) - 1: + tst.assertEqual(rx_mpls.s, 1) + else: + # not end of stack + tst.assertEqual(rx_mpls.s, 0) + # pop the label to expose the next + rx_mpls = rx_mpls[MPLS].payload + + class TestMPLS(VppTestCase): """ MPLS Test Case """ @@ -120,18 +152,9 @@ class TestMPLS(VppTestCase): pkts.append(p) return pkts - @staticmethod - def verify_filter(capture, sent): - if not len(capture) == len(sent): - # filter out any IPv6 RAs from the capture - for p in capture: - if p.haslayer(IPv6): - capture.remove(p) - return capture - def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0): try: - capture = self.verify_filter(capture, sent) + capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) @@ -158,33 +181,10 @@ class TestMPLS(VppTestCase): except: raise - def verify_mpls_stack(self, rx, mpls_labels, ttl=255, num=0): - # the rx'd packet has the MPLS label popped - eth = rx[Ether] - self.assertEqual(eth.type, 0x8847) - - rx_mpls = rx[MPLS] - - for ii in range(len(mpls_labels)): - self.assertEqual(rx_mpls.label, mpls_labels[ii]) - self.assertEqual(rx_mpls.cos, 0) - if ii == num: - self.assertEqual(rx_mpls.ttl, ttl) - else: - self.assertEqual(rx_mpls.ttl, 255) - - if ii == len(mpls_labels) - 1: - self.assertEqual(rx_mpls.s, 1) - else: - # not end of stack - self.assertEqual(rx_mpls.s, 0) - # pop the label to expose the next - rx_mpls = rx_mpls[MPLS].payload - def verify_capture_labelled_ip4(self, src_if, capture, sent, mpls_labels): try: - capture = self.verify_filter(capture, sent) + capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) @@ -195,8 +195,8 @@ class TestMPLS(VppTestCase): rx_ip = rx[IP] # the MPLS TTL is copied from the IP - self.verify_mpls_stack( - rx, mpls_labels, rx_ip.ttl, len(mpls_labels) - 1) + verify_mpls_stack(self, rx, mpls_labels, rx_ip.ttl, + len(mpls_labels) - 1) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) @@ -211,7 +211,7 @@ class TestMPLS(VppTestCase): if top is None: top = len(mpls_labels) - 1 try: - capture = self.verify_filter(capture, sent) + capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) @@ -222,8 +222,7 @@ class TestMPLS(VppTestCase): rx_ip = rx[IP] # the MPLS TTL is 255 since it enters a new tunnel - self.verify_mpls_stack( - rx, mpls_labels, ttl, top) + verify_mpls_stack(self, rx, mpls_labels, ttl, top) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) @@ -236,13 +235,13 @@ class TestMPLS(VppTestCase): def verify_capture_labelled(self, src_if, capture, sent, mpls_labels, ttl=254, num=0): try: - capture = self.verify_filter(capture, sent) + capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): rx = capture[i] - self.verify_mpls_stack(rx, mpls_labels, ttl, num) + verify_mpls_stack(self, rx, mpls_labels, ttl, num) except: raise @@ -1049,7 +1048,7 @@ class TestMPLS(VppTestCase): self.pg1.sw_if_index, nh_table_id=1, rpf_id=55, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_multicast=1) route_34_eos.add_vpp_config() @@ -1440,19 +1439,20 @@ class TestMPLSPIC(VppTestCase): for ii in range(64): dst = "3000::%d" % ii local_label = 1600 + ii - vpn_routes.append(VppIpRoute(self, dst, 128, - [VppRoutePath(self.pg2.remote_ip6, - 0xffffffff, - nh_table_id=1, - is_resolve_attached=1, - is_ip6=1), - VppRoutePath(self.pg3.remote_ip6, - 0xffffffff, - nh_table_id=1, - is_ip6=1, - is_resolve_attached=1)], - table_id=1, - is_ip6=1)) + vpn_routes.append(VppIpRoute( + self, dst, 128, + [VppRoutePath(self.pg2.remote_ip6, + 0xffffffff, + nh_table_id=1, + is_resolve_attached=1, + proto=DpoProto.DPO_PROTO_IP6), + VppRoutePath(self.pg3.remote_ip6, + 0xffffffff, + nh_table_id=1, + proto=DpoProto.DPO_PROTO_IP6, + is_resolve_attached=1)], + table_id=1, + is_ip6=1)) vpn_routes[ii].add_vpp_config() vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128, @@ -1525,5 +1525,211 @@ class TestMPLSPIC(VppTestCase): self.assertNotEqual(0, len(rx1)) +class TestMPLSL2(VppTestCase): + """ MPLS-L2 """ + + def setUp(self): + super(TestMPLSL2, self).setUp() + + # create 2 pg interfaces + self.create_pg_interfaces(range(2)) + + # use pg0 as the core facing interface + self.pg0.admin_up() + self.pg0.config_ip4() + self.pg0.resolve_arp() + self.pg0.enable_mpls() + + # use the other 2 for customer facg L2 links + for i in self.pg_interfaces[1:]: + i.admin_up() + + def tearDown(self): + super(TestMPLSL2, self).tearDown() + for i in self.pg_interfaces[1:]: + i.admin_down() + + self.pg0.disable_mpls() + self.pg0.unconfig_ip4() + self.pg0.admin_down() + + def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels, + ttl=255, top=None): + if top is None: + top = len(mpls_labels) - 1 + + capture = verify_filter(capture, sent) + + self.assertEqual(len(capture), len(sent)) + + for i in range(len(capture)): + tx = sent[i] + rx = capture[i] + + # the MPLS TTL is 255 since it enters a new tunnel + verify_mpls_stack(self, rx, mpls_labels, ttl, top) + + tx_eth = tx[Ether] + rx_eth = Ether(str(rx[MPLS].payload)) + + self.assertEqual(rx_eth.src, tx_eth.src) + self.assertEqual(rx_eth.dst, tx_eth.dst) + + def test_vpws(self): + """ Virtual Private Wire Service """ + + # + # Create an MPLS tunnel that pushes 1 label + # + mpls_tun_1 = VppMPLSTunnelInterface(self, + [VppRoutePath(self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[42])], + is_l2=1) + mpls_tun_1.add_vpp_config() + mpls_tun_1.admin_up() + + # + # Create a label entry to for 55 that does L2 input to the tunnel + # + route_55_eos = VppMplsRoute( + self, 55, 1, + [VppRoutePath("0.0.0.0", + mpls_tun_1.sw_if_index, + is_interface_rx=1, + proto=DpoProto.DPO_PROTO_ETHERNET)]) + route_55_eos.add_vpp_config() + + # + # Cross-connect the tunnel with one of the customers L2 interfaces + # + self.vapi.sw_interface_set_l2_xconnect(self.pg1.sw_if_index, + mpls_tun_1.sw_if_index, + enable=1) + self.vapi.sw_interface_set_l2_xconnect(mpls_tun_1.sw_if_index, + self.pg1.sw_if_index, + enable=1) + + # + # inject a packet from the core + # + pcore = (Ether(dst=self.pg0.local_mac, + src=self.pg0.remote_mac) / + MPLS(label=55, ttl=64) / + Ether(dst="00:00:de:ad:ba:be", + src="00:00:de:ad:be:ef") / + IP(src="10.10.10.10", dst="11.11.11.11") / + UDP(sport=1234, dport=1234) / + Raw('\xa5' * 100)) + + self.pg0.add_stream(pcore * 65) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg1.get_capture(65) + tx = pcore[MPLS].payload + + self.assertEqual(rx0[0][Ether].dst, tx[Ether].dst) + self.assertEqual(rx0[0][Ether].src, tx[Ether].src) + + # + # Inject a packet from the custoer/L2 side + # + self.pg1.add_stream(tx * 65) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg0.get_capture(65) + + self.verify_capture_tunneled_ethernet(rx0, tx*65, [42]) + + def test_vpls(self): + """ Virtual Private LAN Service """ + # + # Create an L2 MPLS tunnel + # + mpls_tun = VppMPLSTunnelInterface(self, + [VppRoutePath(self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[42])], + is_l2=1) + mpls_tun.add_vpp_config() + mpls_tun.admin_up() + + # + # Create a label entry to for 55 that does L2 input to the tunnel + # + route_55_eos = VppMplsRoute( + self, 55, 1, + [VppRoutePath("0.0.0.0", + mpls_tun.sw_if_index, + is_interface_rx=1, + proto=DpoProto.DPO_PROTO_ETHERNET)]) + route_55_eos.add_vpp_config() + + # + # add to tunnel to the customers bridge-domain + # + self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index, + bd_id=1) + self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index, + bd_id=1) + + # + # Packet from the customer interface and from the core + # + p_cust = (Ether(dst="00:00:de:ad:ba:be", + src="00:00:de:ad:be:ef") / + IP(src="10.10.10.10", dst="11.11.11.11") / + UDP(sport=1234, dport=1234) / + Raw('\xa5' * 100)) + p_core = (Ether(src="00:00:de:ad:ba:be", + dst="00:00:de:ad:be:ef") / + IP(dst="10.10.10.10", src="11.11.11.11") / + UDP(sport=1234, dport=1234) / + Raw('\xa5' * 100)) + + # + # The BD is learning, so send in one of each packet to learn + # + p_core_encap = (Ether(dst=self.pg0.local_mac, + src=self.pg0.remote_mac) / + MPLS(label=55, ttl=64) / + p_core) + + self.pg1.add_stream(p_cust) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + self.pg0.add_stream(p_core_encap) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + # we've learnt this so expect it be be forwarded + rx0 = self.pg1.get_capture(1) + + self.assertEqual(rx0[0][Ether].dst, p_core[Ether].dst) + self.assertEqual(rx0[0][Ether].src, p_core[Ether].src) + + # + # now a stream in each direction + # + self.pg1.add_stream(p_cust * 65) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg0.get_capture(65) + + self.verify_capture_tunneled_ethernet(rx0, p_cust*65, [42]) + + # + # remove interfaces from customers bridge-domain + # + self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index, + bd_id=1, + enable=0) + self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index, + bd_id=1, + enable=0) + if __name__ == '__main__': unittest.main(testRunner=VppTestRunner) diff --git a/test/test_p2p_ethernet.py b/test/test_p2p_ethernet.py index 37a1d18b..8688f7e6 100644 --- a/test/test_p2p_ethernet.py +++ b/test/test_p2p_ethernet.py @@ -11,7 +11,7 @@ from scapy.layers.inet6 import IPv6 from framework import VppTestCase, VppTestRunner, running_extended_tests from vpp_sub_interface import VppP2PSubint -from vpp_ip_route import VppIpRoute, VppRoutePath +from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto from util import mactobinary @@ -219,7 +219,7 @@ class P2PEthernetIPV6(VppTestCase): route_8000 = VppIpRoute(self, "8000::", 64, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_8000.add_vpp_config() @@ -239,7 +239,7 @@ class P2PEthernetIPV6(VppTestCase): route_9001 = VppIpRoute(self, "9001::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_9001.add_vpp_config() @@ -264,7 +264,7 @@ class P2PEthernetIPV6(VppTestCase): route_3 = VppIpRoute(self, "9000::", 64, [VppRoutePath(self.pg1._remote_hosts[0].ip6, self.pg1.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_3.add_vpp_config() @@ -289,7 +289,7 @@ class P2PEthernetIPV6(VppTestCase): route_9001 = VppIpRoute(self, "9000::", 64, [VppRoutePath(self.pg1._remote_hosts[0].ip6, self.pg1.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_9001.add_vpp_config() @@ -310,19 +310,19 @@ class P2PEthernetIPV6(VppTestCase): route_8000 = VppIpRoute(self, "8000::", 64, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_8000.add_vpp_config() route_8001 = VppIpRoute(self, "8001::", 64, [VppRoutePath(self.p2p_sub_ifs[0].remote_ip6, self.p2p_sub_ifs[0].sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_8001.add_vpp_config() route_8002 = VppIpRoute(self, "8002::", 64, [VppRoutePath(self.p2p_sub_ifs[1].remote_ip6, self.p2p_sub_ifs[1].sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_8002.add_vpp_config() diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py index badb3102..2c489e3c 100644 --- a/test/vpp_ip_route.py +++ b/test/vpp_ip_route.py @@ -29,6 +29,14 @@ class MRouteEntryFlags: MFIB_ENTRY_FLAG_INHERIT_ACCEPT = 8 +class DpoProto: + DPO_PROTO_IP4 = 0 + DPO_PROTO_IP6 = 1 + DPO_PROTO_MPLS = 2 + DPO_PROTO_ETHERNET = 3 + DPO_PROTO_NSH = 4 + + def find_route(test, ip_addr, len, table_id=0, inet=AF_INET): if inet == AF_INET: s = 4 @@ -55,22 +63,24 @@ class VppRoutePath(object): nh_table_id=0, labels=[], nh_via_label=MPLS_LABEL_INVALID, - is_ip6=0, rpf_id=0, is_interface_rx=0, is_resolve_host=0, - is_resolve_attached=0): + is_resolve_attached=0, + proto=DpoProto.DPO_PROTO_IP4): self.nh_itf = nh_sw_if_index self.nh_table_id = nh_table_id self.nh_via_label = nh_via_label self.nh_labels = labels self.weight = 1 self.rpf_id = rpf_id - self.is_ip4 = 1 if is_ip6 == 0 else 0 - if self.is_ip4: + self.proto = proto + if self.proto is DpoProto.DPO_PROTO_IP6: + self.nh_addr = inet_pton(AF_INET6, nh_addr) + elif self.proto is DpoProto.DPO_PROTO_IP4: self.nh_addr = inet_pton(AF_INET, nh_addr) else: - self.nh_addr = inet_pton(AF_INET6, nh_addr) + self.nh_addr = inet_pton(AF_INET6, "::") self.is_resolve_host = is_resolve_host self.is_resolve_attached = is_resolve_attached self.is_interface_rx = is_interface_rx @@ -401,7 +411,7 @@ class VppMplsRoute(VppObject): self._test.vapi.mpls_route_add_del( self.local_label, self.eos_bit, - path.is_ip4, + path.proto, path.nh_addr, path.nh_itf, is_multicast=self.is_multicast, @@ -420,7 +430,7 @@ class VppMplsRoute(VppObject): for path in self.paths: self._test.vapi.mpls_route_add_del(self.local_label, self.eos_bit, - 1, + path.proto, path.nh_addr, path.nh_itf, is_rpf_id=path.is_rpf_id, diff --git a/test/vpp_mpls_tunnel_interface.py b/test/vpp_mpls_tunnel_interface.py index f2001574..0542b05c 100644 --- a/test/vpp_mpls_tunnel_interface.py +++ b/test/vpp_mpls_tunnel_interface.py @@ -9,13 +9,14 @@ class VppMPLSTunnelInterface(VppInterface): VPP MPLS Tunnel interface """ - def __init__(self, test, paths, is_multicast=0): + def __init__(self, test, paths, is_multicast=0, is_l2=0): """ Create MPLS Tunnel interface """ self._sw_if_index = 0 super(VppMPLSTunnelInterface, self).__init__(test) self._test = test self.t_paths = paths self.is_multicast = is_multicast + self.is_l2 = is_l2 def add_vpp_config(self): self._sw_if_index = 0xffffffff @@ -29,7 +30,8 @@ class VppMPLSTunnelInterface(VppInterface): path.weight, next_hop_out_label_stack=path.nh_labels, next_hop_n_out_labels=len(path.nh_labels), - is_multicast=self.is_multicast) + is_multicast=self.is_multicast, + l2_only=self.is_l2) self._sw_if_index = reply.sw_if_index def remove_vpp_config(self): diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py index 801a6c2d..3ba2ad4a 100644 --- a/test/vpp_papi_provider.py +++ b/test/vpp_papi_provider.py @@ -921,7 +921,7 @@ class VppPapiProvider(object): self, label, eos, - next_hop_proto_is_ip4, + next_hop_proto, next_hop_address, next_hop_sw_if_index=0xFFFFFFFF, table_id=0, @@ -982,7 +982,7 @@ class VppPapiProvider(object): 'mr_is_resolve_attached': is_resolve_attached, 'mr_is_interface_rx': is_interface_rx, 'mr_is_rpf_id': is_rpf_id, - 'mr_next_hop_proto_is_ip4': next_hop_proto_is_ip4, + 'mr_next_hop_proto': next_hop_proto, 'mr_next_hop_weight': next_hop_weight, 'mr_next_hop': next_hop_address, 'mr_next_hop_n_out_labels': next_hop_n_out_labels, -- cgit 1.2.3-korg