summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2018-02-23 05:29:09 -0800
committerDamjan Marion <dmarion.lists@gmail.com>2018-03-09 11:59:58 +0000
commit31ed74407643595fdce206e9d7487108fb8b33ab (patch)
treec22c3703c30b7d457b858fe899f56e57613cbb52
parent8f931a47b0fa58d5d33a792062650a42ff8bef70 (diff)
MPLS Unifom mode
- support both pipe and uniform modes for all MPLS LSP - all API programming for output-labels requires that the mode (and associated data) is specificed - API changes in MPLS, BIER and IP are involved - new DPO [sub] types for MPLS labels to handle the two modes. Change-Id: I87b76401e996f10dfbdbe4552ff6b19af958783c Signed-off-by: Neale Ranns <nranns@cisco.com>
-rw-r--r--src/vat/api_format.c16
-rw-r--r--src/vnet/adj/adj_l2.c5
-rw-r--r--src/vnet/bier/bier.api10
-rw-r--r--src/vnet/bier/bier_api.c20
-rw-r--r--src/vnet/bier/bier_fmask.c9
-rw-r--r--src/vnet/bier/bier_output.c4
-rw-r--r--src/vnet/bier/bier_test.c30
-rw-r--r--src/vnet/buffer.h2
-rw-r--r--src/vnet/dpo/dpo.h8
-rw-r--r--src/vnet/dpo/mpls_disposition.c316
-rw-r--r--src/vnet/dpo/mpls_disposition.h15
-rw-r--r--src/vnet/dpo/mpls_label_dpo.c1075
-rw-r--r--src/vnet/dpo/mpls_label_dpo.h65
-rw-r--r--src/vnet/fib/fib_api.h2
-rw-r--r--src/vnet/fib/fib_entry_src.c4
-rw-r--r--src/vnet/fib/fib_path.c20
-rw-r--r--src/vnet/fib/fib_path.h1
-rw-r--r--src/vnet/fib/fib_path_ext.c77
-rw-r--r--src/vnet/fib/fib_path_ext.h34
-rw-r--r--src/vnet/fib/fib_table.c4
-rw-r--r--src/vnet/fib/fib_table.h4
-rw-r--r--src/vnet/fib/fib_test.c205
-rw-r--r--src/vnet/fib/fib_test.h5
-rw-r--r--src/vnet/fib/fib_types.api18
-rw-r--r--src/vnet/fib/fib_types.c53
-rw-r--r--src/vnet/fib/fib_types.h67
-rw-r--r--src/vnet/ip/ip.api32
-rw-r--r--src/vnet/ip/ip6_packet.h18
-rw-r--r--src/vnet/ip/ip_api.c30
-rw-r--r--src/vnet/mfib/mfib_test.c6
-rw-r--r--src/vnet/mpls/mpls.api54
-rw-r--r--src/vnet/mpls/mpls.c3
-rw-r--r--src/vnet/mpls/mpls_api.c34
-rw-r--r--src/vnet/mpls/mpls_input.c35
-rw-r--r--src/vnet/mpls/mpls_tunnel.c16
-rw-r--r--src/vnet/mpls/mpls_types.h8
-rw-r--r--src/vnet/mpls/packet.h26
-rwxr-xr-xsrc/vnet/srmpls/sr_mpls_policy.c9
-rwxr-xr-xsrc/vnet/srmpls/sr_mpls_steering.c12
-rw-r--r--src/vnet/vxlan/vxlan.api1
-rw-r--r--test/test_bier.py29
-rw-r--r--test/test_mpls.py841
-rw-r--r--test/test_udp.py12
-rw-r--r--test/vpp_bier.py29
-rw-r--r--test/vpp_ip_route.py44
-rw-r--r--test/vpp_mpls_tunnel_interface.py8
-rw-r--r--test/vpp_papi_provider.py21
47 files changed, 2324 insertions, 1013 deletions
diff --git a/src/vat/api_format.c b/src/vat/api_format.c
index dc7111d4ea7..776f3500675 100644
--- a/src/vat/api_format.c
+++ b/src/vat/api_format.c
@@ -8872,7 +8872,7 @@ api_bier_route_add_del (vat_main_t * vam)
}
/* Construct the API message */
- M2 (BIER_ROUTE_ADD_DEL, mp, sizeof (vl_api_fib_path3_t));
+ M2 (BIER_ROUTE_ADD_DEL, mp, sizeof (vl_api_fib_path_t));
mp->br_is_add = is_add;
mp->br_tbl_id.bt_set = set;
@@ -8881,7 +8881,7 @@ api_bier_route_add_del (vat_main_t * vam)
mp->br_bp = ntohs (bp);
mp->br_n_paths = 1;
mp->br_paths[0].n_labels = 1;
- mp->br_paths[0].label_stack[0] = ntohl (next_hop_out_label);
+ mp->br_paths[0].label_stack[0].label = ntohl (next_hop_out_label);
mp->br_paths[0].afi = (next_hop_proto_is_ip4 ? 0 : 1);
if (next_hop_proto_is_ip4)
@@ -19634,7 +19634,7 @@ api_netmap_delete (vat_main_t * vam)
}
static void
-vl_api_mpls_fib_path_print (vat_main_t * vam, vl_api_fib_path2_t * fp)
+vl_api_mpls_fib_path_print (vat_main_t * vam, vl_api_fib_path_t * fp)
{
if (fp->afi == IP46_TYPE_IP6)
print (vam->ofp,
@@ -19654,7 +19654,7 @@ vl_api_mpls_fib_path_print (vat_main_t * vam, vl_api_fib_path2_t * fp)
static void
vl_api_mpls_fib_path_json_print (vat_json_node_t * node,
- vl_api_fib_path2_t * fp)
+ vl_api_fib_path_t * fp)
{
struct in_addr ip4;
struct in6_addr ip6;
@@ -19683,7 +19683,7 @@ vl_api_mpls_tunnel_details_t_handler (vl_api_mpls_tunnel_details_t * mp)
{
vat_main_t *vam = &vat_main;
int count = ntohl (mp->mt_count);
- vl_api_fib_path2_t *fp;
+ vl_api_fib_path_t *fp;
i32 i;
print (vam->ofp, "[%d]: sw_if_index %d via:",
@@ -19707,7 +19707,7 @@ vl_api_mpls_tunnel_details_t_handler_json (vl_api_mpls_tunnel_details_t * mp)
vat_main_t *vam = &vat_main;
vat_json_node_t *node = NULL;
int count = ntohl (mp->mt_count);
- vl_api_fib_path2_t *fp;
+ vl_api_fib_path_t *fp;
i32 i;
if (VAT_JSON_ARRAY != vam->json_tree.type)
@@ -19773,7 +19773,7 @@ vl_api_mpls_fib_details_t_handler (vl_api_mpls_fib_details_t * mp)
{
vat_main_t *vam = &vat_main;
int count = ntohl (mp->count);
- vl_api_fib_path2_t *fp;
+ vl_api_fib_path_t *fp;
int i;
print (vam->ofp,
@@ -19793,7 +19793,7 @@ static void vl_api_mpls_fib_details_t_handler_json
vat_main_t *vam = &vat_main;
int count = ntohl (mp->count);
vat_json_node_t *node = NULL;
- vl_api_fib_path2_t *fp;
+ vl_api_fib_path_t *fp;
int i;
if (VAT_JSON_ARRAY != vam->json_tree.type)
diff --git a/src/vnet/adj/adj_l2.c b/src/vnet/adj/adj_l2.c
index 132ceb28168..09bf468ac40 100644
--- a/src/vnet/adj/adj_l2.c
+++ b/src/vnet/adj/adj_l2.c
@@ -91,6 +91,11 @@ adj_l2_rewrite_inline (vlib_main_t * vm,
rw_len0 = adj0[0].rewrite_header.data_bytes;
vnet_buffer(p0)->ip.save_rewrite_length = rw_len0;
vnet_buffer(p0)->sw_if_index[VLIB_TX] = adj0->rewrite_header.sw_if_index;
+ /* since we are coming out of the L2 world, where the vlib_buffer
+ * union is used for other things, make sure it is clean for
+ * MPLS from now on.
+ */
+ vnet_buffer(p0)->mpls.first = 0;
vlib_increment_combined_counter(&adjacency_counters,
thread_index,
diff --git a/src/vnet/bier/bier.api b/src/vnet/bier/bier.api
index 446863c8ee8..d07379e92f0 100644
--- a/src/vnet/bier/bier.api
+++ b/src/vnet/bier/bier.api
@@ -18,7 +18,7 @@
This file defines vpp BIER control-plane API messages which are generally
called through a shared memory interface.
*/
-option version = "1.0.0";
+option version = "1.1.0";
import "vnet/fib/fib_types.api";
/** \brief BIER Table Indentifier
@@ -84,7 +84,7 @@ autoreply define bier_route_add_del
u8 br_is_replace;
vl_api_bier_table_id_t br_tbl_id;
u8 br_n_paths;
- vl_api_fib_path3_t br_paths[br_n_paths];
+ vl_api_fib_path_t br_paths[br_n_paths];
};
define bier_route_dump
@@ -101,7 +101,7 @@ define bier_route_details
u16 br_bp;
vl_api_bier_table_id_t br_tbl_id;
u32 br_n_paths;
- vl_api_fib_path3_t br_paths[br_n_paths];
+ vl_api_fib_path_t br_paths[br_n_paths];
};
/** \brief BIER Imposition Add
@@ -211,7 +211,7 @@ autoreply define bier_disp_entry_add_del
u8 bde_is_add;
u8 bde_payload_proto;
u8 bde_n_paths;
- vl_api_fib_path3_t bde_paths[bde_n_paths];
+ vl_api_fib_path_t bde_paths[bde_n_paths];
};
define bier_disp_entry_dump
@@ -229,7 +229,7 @@ define bier_disp_entry_details
u8 bde_is_add;
u8 bde_payload_proto;
u8 bde_n_paths;
- vl_api_fib_path3_t bde_paths[bde_n_paths];
+ vl_api_fib_path_t bde_paths[bde_n_paths];
};
/*
diff --git a/src/vnet/bier/bier_api.c b/src/vnet/bier/bier_api.c
index 4b1d1c28781..77b2cabaa44 100644
--- a/src/vnet/bier/bier_api.c
+++ b/src/vnet/bier/bier_api.c
@@ -202,8 +202,16 @@ vl_api_bier_route_add_del_t_handler (vl_api_bier_route_add_del_t * mp)
mp->br_paths[ii].n_labels - 1);
for (jj = 0; jj < mp->br_paths[ii].n_labels; jj++)
{
- brpath->frp_label_stack[jj] =
- ntohl(mp->br_paths[ii].label_stack[jj]);
+ brpath->frp_label_stack[jj].fml_value =
+ ntohl(mp->br_paths[ii].label_stack[jj].label);
+ brpath->frp_label_stack[jj].fml_ttl =
+ mp->br_paths[ii].label_stack[jj].ttl;
+ brpath->frp_label_stack[jj].fml_exp =
+ mp->br_paths[ii].label_stack[jj].exp;
+ brpath->frp_label_stack[jj].fml_mode =
+ (mp->br_paths[ii].label_stack[jj].is_uniform ?
+ FIB_MPLS_LSP_MODE_UNIFORM :
+ FIB_MPLS_LSP_MODE_PIPE);
}
if (mp->br_paths[ii].is_udp_encap)
@@ -275,11 +283,11 @@ send_bier_route_details (const bier_table_t *bt,
fib_route_path_encode_t *api_rpaths = NULL, *api_rpath;
bier_route_details_walk_t *ctx = args;
vl_api_bier_route_details_t *mp;
- vl_api_fib_path3_t *fp;
+ vl_api_fib_path_t *fp;
u32 n_paths, m_size;
n_paths = fib_path_list_get_n_paths(be->be_path_list);
- m_size = sizeof(*mp) + (n_paths * sizeof(vl_api_fib_path3_t));
+ m_size = sizeof(*mp) + (n_paths * sizeof(vl_api_fib_path_t));
mp = vl_msg_api_alloc(m_size);
if (!mp)
return;
@@ -636,7 +644,7 @@ send_bier_disp_entry_details (const bier_disp_table_t *bdt,
bier_disp_entry_details_walk_t *ctx = args;
vl_api_bier_disp_entry_details_t *mp;
bier_hdr_proto_id_t pproto;
- vl_api_fib_path3_t *fp;
+ vl_api_fib_path_t *fp;
u32 n_paths, m_size;
FOR_EACH_BIER_HDR_PROTO(pproto)
@@ -645,7 +653,7 @@ send_bier_disp_entry_details (const bier_disp_table_t *bdt,
if (INDEX_INVALID != pl)
{
n_paths = fib_path_list_get_n_paths(pl);
- m_size = sizeof(*mp) + (n_paths * sizeof(vl_api_fib_path3_t));
+ m_size = sizeof(*mp) + (n_paths * sizeof(vl_api_fib_path_t));
mp = vl_msg_api_alloc(m_size);
if (!mp)
return;
diff --git a/src/vnet/bier/bier_fmask.c b/src/vnet/bier/bier_fmask.c
index 2fc24dca819..31d884af060 100644
--- a/src/vnet/bier/bier_fmask.c
+++ b/src/vnet/bier/bier_fmask.c
@@ -177,16 +177,13 @@ bier_fmask_init (bier_fmask_t *bfm,
if (!(bfm->bfm_flags & BIER_FMASK_FLAG_DISP))
{
- /*
- * leave this label in host byte order so we can OR in the TTL
- */
if (NULL != rpaths->frp_label_stack)
{
- olabel = rpaths->frp_label_stack[0];
+ olabel = rpaths->frp_label_stack[0].fml_value;
vnet_mpls_uc_set_label(&bfm->bfm_label, olabel);
vnet_mpls_uc_set_exp(&bfm->bfm_label, 0);
vnet_mpls_uc_set_s(&bfm->bfm_label, 1);
- vnet_mpls_uc_set_ttl(&bfm->bfm_label, 0);
+ vnet_mpls_uc_set_ttl(&bfm->bfm_label, 64);
bfm->bfm_flags |= BIER_FMASK_FLAG_MPLS;
}
else
@@ -207,7 +204,9 @@ bier_fmask_init (bier_fmask_t *bfm,
vnet_mpls_uc_set_label(&bfm->bfm_label, id);
vnet_mpls_uc_set_s(&bfm->bfm_label, 1);
vnet_mpls_uc_set_exp(&bfm->bfm_label, 0);
+ vnet_mpls_uc_set_ttl(&bfm->bfm_label, 64);
}
+ bfm->bfm_label = clib_host_to_net_u32(bfm->bfm_label);
}
bfm->bfm_pl = fib_path_list_create((FIB_PATH_LIST_FLAG_SHARED |
diff --git a/src/vnet/bier/bier_output.c b/src/vnet/bier/bier_output.c
index db115d3ad5e..01eeffe475c 100644
--- a/src/vnet/bier/bier_output.c
+++ b/src/vnet/bier/bier_output.c
@@ -140,8 +140,8 @@ bier_output (vlib_main_t * vm,
h0 = vlib_buffer_get_current(b0);
h0[0] = bfm0->bfm_label;
- vnet_mpls_uc_set_ttl(h0, vnet_buffer(b0)->mpls.ttl - 1);
- h0[0] = clib_host_to_net_u32(h0[0]);
+
+ ((char*)h0)[3]= vnet_buffer(b0)->mpls.ttl - 1;
}
/*
diff --git a/src/vnet/bier/bier_test.c b/src/vnet/bier/bier_test.c
index 08a8c55954d..6c7af829811 100644
--- a/src/vnet/bier/bier_test.c
+++ b/src/vnet/bier/bier_test.c
@@ -316,7 +316,10 @@ bier_test_mpls_spf (void)
.frp_bier_fib_index = bti,
.frp_sw_if_index = ~0,
};
- vec_add1(path_1_1_1_1.frp_label_stack, 500);
+ fib_mpls_label_t fml_500 = {
+ .fml_value = 500,
+ };
+ vec_add1(path_1_1_1_1.frp_label_stack, fml_500);
vec_add1(paths_1_1_1_1, path_1_1_1_1);
const fib_prefix_t pfx_1_1_1_1_s_32 = {
.fp_addr = nh_1_1_1_1,
@@ -389,8 +392,10 @@ bier_test_mpls_spf (void)
.ttl = 255,
},
};
- mpls_label_t *out_lbl_99 = NULL;
- vec_add1(out_lbl_99, 99);
+ fib_mpls_label_t *out_lbl_99 = NULL, fml_99 = {
+ .fml_value = 99,
+ };
+ vec_add1(out_lbl_99, fml_99);
fei = fib_table_entry_update_one_path(0,
&pfx_1_1_1_1_s_32,
@@ -443,8 +448,10 @@ bier_test_mpls_spf (void)
.ttl = 255,
},
};
- mpls_label_t *out_lbl_100 = NULL;
- vec_add1(out_lbl_100, 100);
+ fib_mpls_label_t *out_lbl_100 = NULL, fml_100 = {
+ .fml_value = 100,
+ };
+ vec_add1(out_lbl_100, fml_100);
fei = fib_table_entry_path_add(0,
&pfx_1_1_1_1_s_32,
@@ -505,13 +512,18 @@ bier_test_mpls_spf (void)
.frp_bier_fib_index = bti,
.frp_sw_if_index = ~0,
};
- vec_add1(path_1_1_1_2.frp_label_stack, 501);
+ fib_mpls_label_t fml_501 = {
+ .fml_value = 501,
+ };
+ vec_add1(path_1_1_1_2.frp_label_stack, fml_501);
vec_add1(paths_1_1_1_2, path_1_1_1_2);
input_paths_1_1_1_2 = vec_dup(paths_1_1_1_2);
index_t bei_3;
- mpls_label_t *out_lbl_101 = NULL;
- vec_add1(out_lbl_101, 101);
+ fib_mpls_label_t *out_lbl_101 = NULL, fml_101 = {
+ .fml_value = 101,
+ };
+ vec_add1(out_lbl_101, fml_101);
fei = fib_table_entry_path_add(0,
&pfx_1_1_1_2_s_32,
FIB_SOURCE_API,
@@ -605,7 +617,7 @@ bier_test_mpls_spf (void)
* add the via back
*/
out_lbl_101 = NULL;
- vec_add1(out_lbl_101, 101);
+ vec_add1(out_lbl_101, fml_101);
fei = fib_table_entry_path_add(0,
&pfx_1_1_1_2_s_32,
FIB_SOURCE_API,
diff --git a/src/vnet/buffer.h b/src/vnet/buffer.h
index 5aedb431654..7a4bc245a30 100644
--- a/src/vnet/buffer.h
+++ b/src/vnet/buffer.h
@@ -181,6 +181,8 @@ typedef struct
*/
struct
{
+ /* do not overlay w/ ip.adj_index[0,1] nor flow hash */
+ u32 pad[VLIB_N_RX_TX + 1];
u8 ttl;
u8 exp;
u8 first;
diff --git a/src/vnet/dpo/dpo.h b/src/vnet/dpo/dpo.h
index afee458c02f..4d484786fba 100644
--- a/src/vnet/dpo/dpo.h
+++ b/src/vnet/dpo/dpo.h
@@ -111,8 +111,8 @@ typedef enum dpo_type_t_ {
DPO_LOOKUP,
DPO_LISP_CP,
DPO_CLASSIFY,
- DPO_MPLS_LABEL,
- DPO_MPLS_DISPOSITION,
+ DPO_MPLS_DISPOSITION_PIPE,
+ DPO_MPLS_DISPOSITION_UNIFORM,
DPO_MFIB_ENTRY,
DPO_INTERFACE_RX,
DPO_INTERFACE_TX,
@@ -146,8 +146,8 @@ typedef enum dpo_type_t_ {
[DPO_REPLICATE] = "dpo-replicate", \
[DPO_LISP_CP] = "dpo-lisp-cp", \
[DPO_CLASSIFY] = "dpo-classify", \
- [DPO_MPLS_LABEL] = "dpo-mpls-label", \
- [DPO_MPLS_DISPOSITION] = "dpo-mpls-diposition", \
+ [DPO_MPLS_DISPOSITION_PIPE] = "dpo-mpls-diposition-pipe", \
+ [DPO_MPLS_DISPOSITION_UNIFORM] = "dpo-mpls-diposition-uniform", \
[DPO_MFIB_ENTRY] = "dpo-mfib-entry", \
[DPO_INTERFACE_RX] = "dpo-interface-rx", \
[DPO_INTERFACE_TX] = "dpo-interface-tx", \
diff --git a/src/vnet/dpo/mpls_disposition.c b/src/vnet/dpo/mpls_disposition.c
index 77429de4116..2956e541d57 100644
--- a/src/vnet/dpo/mpls_disposition.c
+++ b/src/vnet/dpo/mpls_disposition.c
@@ -42,38 +42,55 @@ mpls_disp_dpo_get_index (mpls_disp_dpo_t *mdd)
return (mdd - mpls_disp_dpo_pool);
}
-index_t
+void
mpls_disp_dpo_create (dpo_proto_t payload_proto,
fib_rpf_id_t rpf_id,
- const dpo_id_t *dpo)
+ fib_mpls_lsp_mode_t mode,
+ const dpo_id_t *parent,
+ dpo_id_t *dpo)
{
mpls_disp_dpo_t *mdd;
+ dpo_type_t dtype;
mdd = mpls_disp_dpo_alloc();
mdd->mdd_payload_proto = payload_proto;
mdd->mdd_rpf_id = rpf_id;
-
- dpo_stack(DPO_MPLS_DISPOSITION,
+ mdd->mdd_mode = mode;
+ dtype = (FIB_MPLS_LSP_MODE_PIPE == mode ?
+ DPO_MPLS_DISPOSITION_PIPE :
+ DPO_MPLS_DISPOSITION_UNIFORM);
+
+ /*
+ * stack this disposition object on the parent given
+ */
+ dpo_stack(dtype,
mdd->mdd_payload_proto,
&mdd->mdd_dpo,
- dpo);
-
- return (mpls_disp_dpo_get_index(mdd));
+ parent);
+
+ /*
+ * set up the return DPO to refer to this object
+ */
+ dpo_set(dpo,
+ dtype,
+ payload_proto,
+ mpls_disp_dpo_get_index(mdd));
}
u8*
format_mpls_disp_dpo (u8 *s, va_list *args)
{
- index_t index = va_arg (*args, index_t);
- u32 indent = va_arg (*args, u32);
+ index_t index = va_arg(*args, index_t);
+ u32 indent = va_arg(*args, u32);
mpls_disp_dpo_t *mdd;
mdd = mpls_disp_dpo_get(index);
- s = format(s, "mpls-disposition:[%d]:[%U]",
+ s = format(s, "mpls-disposition:[%d]:[%U, %U]",
index,
- format_dpo_proto, mdd->mdd_payload_proto);
+ format_dpo_proto, mdd->mdd_payload_proto,
+ format_fib_mpls_lsp_mode, mdd->mdd_mode);
s = format(s, "\n%U", format_white_space, indent);
s = format(s, "%U", format_dpo_id, &mdd->mdd_dpo, indent+2);
@@ -116,25 +133,41 @@ typedef struct mpls_label_disposition_trace_t_
index_t mdd;
} mpls_label_disposition_trace_t;
-extern vlib_node_registration_t ip4_mpls_label_disposition_node;
-extern vlib_node_registration_t ip6_mpls_label_disposition_node;
+extern vlib_node_registration_t ip4_mpls_label_disposition_pipe_node;
+extern vlib_node_registration_t ip6_mpls_label_disposition_pipe_node;
+extern vlib_node_registration_t ip4_mpls_label_disposition_uniform_node;
+extern vlib_node_registration_t ip6_mpls_label_disposition_uniform_node;
always_inline uword
mpls_label_disposition_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame,
- u8 payload_is_ip4,
- u8 payload_is_ip6)
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ u8 payload_is_ip4,
+ u8 payload_is_ip6,
+ fib_mpls_lsp_mode_t mode)
{
u32 n_left_from, next_index, * from, * to_next;
vlib_node_runtime_t *error_node;
if (payload_is_ip4)
- error_node = vlib_node_get_runtime (vm, ip4_mpls_label_disposition_node.index);
+ {
+ if (FIB_MPLS_LSP_MODE_PIPE == mode)
+ error_node =
+ vlib_node_get_runtime(vm, ip4_mpls_label_disposition_pipe_node.index);
+ else
+ error_node =
+ vlib_node_get_runtime(vm, ip4_mpls_label_disposition_uniform_node.index);
+ }
else
- error_node = vlib_node_get_runtime (vm, ip6_mpls_label_disposition_node.index);
-
- from = vlib_frame_vector_args (from_frame);
+ {
+ if (FIB_MPLS_LSP_MODE_PIPE == mode)
+ error_node =
+ vlib_node_get_runtime(vm, ip6_mpls_label_disposition_uniform_node.index);
+ else
+ error_node =
+ vlib_node_get_runtime(vm, ip6_mpls_label_disposition_uniform_node.index);
+ }
+ from = vlib_frame_vector_args(from_frame);
n_left_from = from_frame->n_vectors;
next_index = node->cached_next_index;
@@ -159,14 +192,14 @@ mpls_label_disposition_inline (vlib_main_t * vm,
{
vlib_buffer_t * p2, * p3;
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
+ p2 = vlib_get_buffer(vm, from[2]);
+ p3 = vlib_get_buffer(vm, from[3]);
- vlib_prefetch_buffer_header (p2, STORE);
- vlib_prefetch_buffer_header (p3, STORE);
+ vlib_prefetch_buffer_header(p2, STORE);
+ vlib_prefetch_buffer_header(p3, STORE);
- CLIB_PREFETCH (p2->data, sizeof (ip6_header_t), STORE);
- CLIB_PREFETCH (p3->data, sizeof (ip6_header_t), STORE);
+ CLIB_PREFETCH(p2->data, sizeof(ip6_header_t), STORE);
+ CLIB_PREFETCH(p3->data, sizeof(ip6_header_t), STORE);
}
from += 2;
@@ -174,8 +207,8 @@ mpls_label_disposition_inline (vlib_main_t * vm,
n_left_from -= 2;
n_left_to_next -= 2;
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
+ b0 = vlib_get_buffer(vm, bi0);
+ b1 = vlib_get_buffer(vm, bi1);
/* dst lookup was done by ip4 lookup */
mddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
@@ -190,30 +223,62 @@ mpls_label_disposition_inline (vlib_main_t * vm,
{
ip4_header_t *ip0, *ip1;
- ip0 = vlib_buffer_get_current (b0);
- ip1 = vlib_buffer_get_current (b1);
+ ip0 = vlib_buffer_get_current(b0);
+ ip1 = vlib_buffer_get_current(b1);
/*
* IPv4 input checks on the exposed IP header
* including checksum
*/
- ip4_input_check_x2 (vm, error_node,
- b0, b1, ip0, ip1,
- &next0, &next1, 1);
+ ip4_input_check_x2(vm, error_node,
+ b0, b1, ip0, ip1,
+ &next0, &next1, 1);
+
+ if (FIB_MPLS_LSP_MODE_UNIFORM == mode)
+ {
+ /*
+ * Copy the TTL from the MPLS packet into the
+ * exposed IP. recalc the chksum
+ */
+ ip0->ttl = vnet_buffer(b0)->mpls.ttl;
+ ip1->ttl = vnet_buffer(b1)->mpls.ttl;
+ ip0->tos = mpls_exp_to_ip_dscp(vnet_buffer(b0)->mpls.exp);
+ ip1->tos = mpls_exp_to_ip_dscp(vnet_buffer(b1)->mpls.exp);
+
+ ip0->checksum = ip4_header_checksum(ip0);
+ ip1->checksum = ip4_header_checksum(ip1);
+ }
}
else if (payload_is_ip6)
{
ip6_header_t *ip0, *ip1;
- ip0 = vlib_buffer_get_current (b0);
- ip1 = vlib_buffer_get_current (b1);
+ ip0 = vlib_buffer_get_current(b0);
+ ip1 = vlib_buffer_get_current(b1);
/*
* IPv6 input checks on the exposed IP header
*/
- ip6_input_check_x2 (vm, error_node,
- b0, b1, ip0, ip1,
- &next0, &next1);
+ ip6_input_check_x2(vm, error_node,
+ b0, b1, ip0, ip1,
+ &next0, &next1);
+
+ if (FIB_MPLS_LSP_MODE_UNIFORM == mode)
+ {
+ /*
+ * Copy the TTL from the MPLS packet into the
+ * exposed IP
+ */
+ ip0->hop_limit = vnet_buffer(b0)->mpls.ttl;
+ ip1->hop_limit = vnet_buffer(b1)->mpls.ttl;
+
+ ip6_set_traffic_class_network_order(
+ ip0,
+ mpls_exp_to_ip_dscp(vnet_buffer(b0)->mpls.exp));
+ ip6_set_traffic_class_network_order(
+ ip1,
+ mpls_exp_to_ip_dscp(vnet_buffer(b1)->mpls.exp));
+ }
}
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mdd0->mdd_dpo.dpoi_index;
@@ -224,14 +289,14 @@ mpls_label_disposition_inline (vlib_main_t * vm,
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
mpls_label_disposition_trace_t *tr =
- vlib_add_trace (vm, node, b0, sizeof (*tr));
+ vlib_add_trace(vm, node, b0, sizeof(*tr));
tr->mdd = mddi0;
}
if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
{
mpls_label_disposition_trace_t *tr =
- vlib_add_trace (vm, node, b1, sizeof (*tr));
+ vlib_add_trace(vm, node, b1, sizeof(*tr));
tr->mdd = mddi1;
}
@@ -254,7 +319,7 @@ mpls_label_disposition_inline (vlib_main_t * vm,
n_left_from -= 1;
n_left_to_next -= 1;
- b0 = vlib_get_buffer (vm, bi0);
+ b0 = vlib_get_buffer(vm, bi0);
/* dst lookup was done by ip4 lookup */
mddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
@@ -265,24 +330,48 @@ mpls_label_disposition_inline (vlib_main_t * vm,
{
ip4_header_t *ip0;
- ip0 = vlib_buffer_get_current (b0);
+ ip0 = vlib_buffer_get_current(b0);
/*
* IPv4 input checks on the exposed IP header
* including checksum
*/
- ip4_input_check_x1 (vm, error_node, b0, ip0, &next0, 1);
+ ip4_input_check_x1(vm, error_node, b0, ip0, &next0, 1);
+
+ if (FIB_MPLS_LSP_MODE_UNIFORM == mode)
+ {
+ /*
+ * Copy the TTL from the MPLS packet into the
+ * exposed IP. recalc the chksum
+ */
+ ip0->ttl = vnet_buffer(b0)->mpls.ttl;
+ ip0->tos = mpls_exp_to_ip_dscp(vnet_buffer(b0)->mpls.exp);
+ ip0->checksum = ip4_header_checksum(ip0);
+ }
}
else if (payload_is_ip6)
{
ip6_header_t *ip0;
- ip0 = vlib_buffer_get_current (b0);
+ ip0 = vlib_buffer_get_current(b0);
/*
* IPv6 input checks on the exposed IP header
*/
- ip6_input_check_x1 (vm, error_node, b0, ip0, &next0);
+ ip6_input_check_x1(vm, error_node, b0, ip0, &next0);
+
+ if (FIB_MPLS_LSP_MODE_UNIFORM == mode)
+ {
+ /*
+ * Copy the TTL from the MPLS packet into the
+ * exposed IP
+ */
+ ip0->hop_limit = vnet_buffer(b0)->mpls.ttl;
+
+ ip6_set_traffic_class_network_order(
+ ip0,
+ mpls_exp_to_ip_dscp(vnet_buffer(b0)->mpls.exp));
+ }
}
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mdd0->mdd_dpo.dpoi_index;
@@ -291,14 +380,14 @@ mpls_label_disposition_inline (vlib_main_t * vm,
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
mpls_label_disposition_trace_t *tr =
- vlib_add_trace (vm, node, b0, sizeof (*tr));
+ vlib_add_trace(vm, node, b0, sizeof(*tr));
tr->mdd = mddi0;
}
vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
}
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ vlib_put_next_frame(vm, node, next_index, n_left_to_next);
}
return from_frame->n_vectors;
}
@@ -306,57 +395,103 @@ mpls_label_disposition_inline (vlib_main_t * vm,
static u8 *
format_mpls_label_disposition_trace (u8 * s, va_list * args)
{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- CLIB_UNUSED (mpls_label_disposition_trace_t * t);
+ CLIB_UNUSED(vlib_main_t * vm) = va_arg(*args, vlib_main_t *);
+ CLIB_UNUSED(vlib_node_t * node) = va_arg(*args, vlib_node_t *);
+ CLIB_UNUSED(mpls_label_disposition_trace_t * t);
- t = va_arg (*args, mpls_label_disposition_trace_t *);
+ t = va_arg(*args, mpls_label_disposition_trace_t *);
s = format(s, "disp:%d", t->mdd);
return (s);
}
static uword
-ip4_mpls_label_disposition (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+ip4_mpls_label_disposition_pipe (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
- return (mpls_label_disposition_inline(vm, node, frame, 1, 0));
+ return (mpls_label_disposition_inline(vm, node, frame, 1, 0,
+ FIB_MPLS_LSP_MODE_PIPE));
}
-VLIB_REGISTER_NODE (ip4_mpls_label_disposition_node) = {
- .function = ip4_mpls_label_disposition,
- .name = "ip4-mpls-label-disposition",
- .vector_size = sizeof (u32),
+VLIB_REGISTER_NODE(ip4_mpls_label_disposition_pipe_node) = {
+ .function = ip4_mpls_label_disposition_pipe,
+ .name = "ip4-mpls-label-disposition-pipe",
+ .vector_size = sizeof(u32),
.format_trace = format_mpls_label_disposition_trace,
.sibling_of = "ip4-input",
.n_errors = IP4_N_ERROR,
.error_strings = ip4_error_strings,
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_disposition_node,
- ip4_mpls_label_disposition)
+VLIB_NODE_FUNCTION_MULTIARCH(ip4_mpls_label_disposition_pipe_node,
+ ip4_mpls_label_disposition_pipe)
static uword
-ip6_mpls_label_disposition (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+ip6_mpls_label_disposition_pipe (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
- return (mpls_label_disposition_inline(vm, node, frame, 0, 1));
+ return (mpls_label_disposition_inline(vm, node, frame, 0, 1,
+ FIB_MPLS_LSP_MODE_PIPE));
}
-VLIB_REGISTER_NODE (ip6_mpls_label_disposition_node) = {
- .function = ip6_mpls_label_disposition,
- .name = "ip6-mpls-label-disposition",
- .vector_size = sizeof (u32),
+VLIB_REGISTER_NODE(ip6_mpls_label_disposition_pipe_node) = {
+ .function = ip6_mpls_label_disposition_pipe,
+ .name = "ip6-mpls-label-disposition-pipe",
+ .vector_size = sizeof(u32),
.format_trace = format_mpls_label_disposition_trace,
.sibling_of = "ip6-input",
.n_errors = IP6_N_ERROR,
.error_strings = ip6_error_strings,
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_disposition_node,
- ip6_mpls_label_disposition)
+VLIB_NODE_FUNCTION_MULTIARCH(ip6_mpls_label_disposition_pipe_node,
+ ip6_mpls_label_disposition_pipe)
+
+static uword
+ip4_mpls_label_disposition_uniform (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_disposition_inline(vm, node, frame, 1, 0,
+ FIB_MPLS_LSP_MODE_UNIFORM));
+}
+
+VLIB_REGISTER_NODE(ip4_mpls_label_disposition_uniform_node) = {
+ .function = ip4_mpls_label_disposition_uniform,
+ .name = "ip4-mpls-label-disposition-uniform",
+ .vector_size = sizeof(u32),
+
+ .format_trace = format_mpls_label_disposition_trace,
+ .sibling_of = "ip4-input",
+ .n_errors = IP4_N_ERROR,
+ .error_strings = ip4_error_strings,
+};
+VLIB_NODE_FUNCTION_MULTIARCH(ip4_mpls_label_disposition_uniform_node,
+ ip4_mpls_label_disposition_uniform)
+
+static uword
+ip6_mpls_label_disposition_uniform (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_disposition_inline(vm, node, frame, 0, 1,
+ FIB_MPLS_LSP_MODE_UNIFORM));
+}
+
+VLIB_REGISTER_NODE(ip6_mpls_label_disposition_uniform_node) = {
+ .function = ip6_mpls_label_disposition_uniform,
+ .name = "ip6-mpls-label-disposition-uniform",
+ .vector_size = sizeof(u32),
+
+ .format_trace = format_mpls_label_disposition_trace,
+ .sibling_of = "ip6-input",
+ .n_errors = IP6_N_ERROR,
+ .error_strings = ip6_error_strings,
+};
+VLIB_NODE_FUNCTION_MULTIARCH(ip6_mpls_label_disposition_uniform_node,
+ ip6_mpls_label_disposition_uniform)
static void
mpls_disp_dpo_mem_show (void)
@@ -374,25 +509,44 @@ const static dpo_vft_t mdd_vft = {
.dv_mem_show = mpls_disp_dpo_mem_show,
};
-const static char* const mpls_label_disp_ip4_nodes[] =
+const static char* const mpls_label_disp_pipe_ip4_nodes[] =
+{
+ "ip4-mpls-label-disposition-pipe",
+ NULL,
+};
+const static char* const mpls_label_disp_pipe_ip6_nodes[] =
+{
+ "ip6-mpls-label-disposition-pipe",
+ NULL,
+};
+const static char* const * const mpls_label_disp_pipe_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = mpls_label_disp_pipe_ip4_nodes,
+ [DPO_PROTO_IP6] = mpls_label_disp_pipe_ip6_nodes,
+};
+
+const static char* const mpls_label_disp_uniform_ip4_nodes[] =
{
- "ip4-mpls-label-disposition",
+ "ip4-mpls-label-disposition-uniform",
NULL,
};
-const static char* const mpls_label_disp_ip6_nodes[] =
+const static char* const mpls_label_disp_uniform_ip6_nodes[] =
{
- "ip6-mpls-label-disposition",
+ "ip6-mpls-label-disposition-uniform",
NULL,
};
-const static char* const * const mpls_label_disp_nodes[DPO_PROTO_NUM] =
+const static char* const * const mpls_label_disp_uniform_nodes[DPO_PROTO_NUM] =
{
- [DPO_PROTO_IP4] = mpls_label_disp_ip4_nodes,
- [DPO_PROTO_IP6] = mpls_label_disp_ip6_nodes,
+ [DPO_PROTO_IP4] = mpls_label_disp_uniform_ip4_nodes,
+ [DPO_PROTO_IP6] = mpls_label_disp_uniform_ip6_nodes,
};
void
-mpls_disp_dpo_module_init (void)
+mpls_disp_dpo_module_init(void)
{
- dpo_register(DPO_MPLS_DISPOSITION, &mdd_vft, mpls_label_disp_nodes);
+ dpo_register(DPO_MPLS_DISPOSITION_PIPE, &mdd_vft,
+ mpls_label_disp_pipe_nodes);
+ dpo_register(DPO_MPLS_DISPOSITION_UNIFORM, &mdd_vft,
+ mpls_label_disp_uniform_nodes);
}
diff --git a/src/vnet/dpo/mpls_disposition.h b/src/vnet/dpo/mpls_disposition.h
index 9c0150830d2..9c3cc46ff30 100644
--- a/src/vnet/dpo/mpls_disposition.h
+++ b/src/vnet/dpo/mpls_disposition.h
@@ -45,6 +45,11 @@ typedef struct mpls_disp_dpo_t
* Number of locks/users of the label
*/
u16 mdd_locks;
+
+ /**
+ * LSP mode
+ */
+ fib_mpls_lsp_mode_t mdd_mode;
} mpls_disp_dpo_t;
/**
@@ -60,11 +65,15 @@ _Static_assert((sizeof(mpls_disp_dpo_t) <= CLIB_CACHE_LINE_BYTES),
*
* @param payload_proto The ptocool of the payload packets that will
* be imposed with this label header.
+ * @param rpf_id The RPF ID the packet will aquire - only for mcast
+ * @param mode The LSP mode; pipe or uniform
* @param dpo The parent of the created MPLS label object
*/
-extern index_t mpls_disp_dpo_create(dpo_proto_t payload_proto,
- fib_rpf_id_t rpf_id,
- const dpo_id_t *dpo);
+extern void mpls_disp_dpo_create(dpo_proto_t payload_proto,
+ fib_rpf_id_t rpf_id,
+ fib_mpls_lsp_mode_t mode,
+ const dpo_id_t *parent,
+ dpo_id_t *dpo);
extern u8* format_mpls_disp_dpo(u8 *s, va_list *args);
diff --git a/src/vnet/dpo/mpls_label_dpo.c b/src/vnet/dpo/mpls_label_dpo.c
index fa5177ab9ea..954d637937b 100644
--- a/src/vnet/dpo/mpls_label_dpo.c
+++ b/src/vnet/dpo/mpls_label_dpo.c
@@ -23,6 +23,17 @@
*/
mpls_label_dpo_t *mpls_label_dpo_pool;
+/**
+ * Strings for the flags
+ */
+const char* mpls_label_dpo_attr_names[] = MPLS_LABEL_DPO_ATTR_NAMES;
+
+/**
+ * registered DPO types for each of the label sub-types. And there's a
+ * subtype for each of the flag combinations.
+ */
+static dpo_type_t mpls_label_dpo_types[1 << MPLS_LABEL_DPO_ATTR_MAX];
+
static mpls_label_dpo_t *
mpls_label_dpo_alloc (void)
{
@@ -42,70 +53,138 @@ mpls_label_dpo_get_index (mpls_label_dpo_t *mld)
return (mld - mpls_label_dpo_pool);
}
-index_t
-mpls_label_dpo_create (mpls_label_t *label_stack,
+void
+mpls_label_dpo_create (fib_mpls_label_t *label_stack,
mpls_eos_bit_t eos,
- u8 ttl,
- u8 exp,
dpo_proto_t payload_proto,
- const dpo_id_t *dpo)
+ mpls_label_dpo_flags_t flags,
+ const dpo_id_t *parent,
+ dpo_id_t *dpo)
{
mpls_label_dpo_t *mld;
+ dpo_type_t dtype;
u32 ii;
+ if ((DPO_PROTO_IP4 != payload_proto) &&
+ (DPO_PROTO_IP6 != payload_proto))
+ {
+ /*
+ * remove unsupported configuration
+ */
+ flags &= ~MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR;
+ }
+
mld = mpls_label_dpo_alloc();
+ mld->mld_flags = flags;
+ dtype = mpls_label_dpo_types[flags];
if (MPLS_LABEL_DPO_MAX_N_LABELS < vec_len(label_stack))
{
clib_warning("Label stack size exceeded");
- dpo_stack(DPO_MPLS_LABEL,
+ dpo_stack(dtype,
mld->mld_payload_proto,
&mld->mld_dpo,
drop_dpo_get(DPO_PROTO_MPLS));
- return (mpls_label_dpo_get_index(mld));
}
+ else
+ {
+ mld->mld_n_labels = vec_len(label_stack);
+ mld->mld_n_hdr_bytes = mld->mld_n_labels * sizeof(mld->mld_hdr[0]);
+ mld->mld_payload_proto = payload_proto;
+
+ /*
+ * construct label rewrite headers for each value passed.
+ * get the header in network byte order since we will paint it
+ * on a packet in the data-plane
+ */
+ for (ii = 0; ii < mld->mld_n_labels-1; ii++)
+ {
+ vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl,
+ label_stack[ii].fml_value);
+ vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl,
+ label_stack[ii].fml_exp);
+ vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl,
+ MPLS_NON_EOS);
+ if (0 != label_stack[ii].fml_ttl)
+ {
+ vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl,
+ label_stack[ii].fml_ttl);
+ }
+ else
+ {
+ vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl,
+ MPLS_LABEL_DEFAULT_TTL);
+ }
+ mld->mld_hdr[ii].label_exp_s_ttl =
+ clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
+ }
- mld->mld_n_labels = vec_len(label_stack);
- mld->mld_n_hdr_bytes = mld->mld_n_labels * sizeof(mld->mld_hdr[0]);
- mld->mld_payload_proto = payload_proto;
+ /*
+ * the inner most label
+ */
+ ii = mld->mld_n_labels-1;
+
+ vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl,
+ label_stack[ii].fml_value);
+ vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl,
+ label_stack[ii].fml_exp);
+ vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, eos);
+ if (0 != label_stack[ii].fml_ttl)
+ {
+ vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl,
+ label_stack[ii].fml_ttl);
+ }
+ else
+ {
+ vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl,
+ MPLS_LABEL_DEFAULT_TTL);
+ }
+ mld->mld_hdr[ii].label_exp_s_ttl =
+ clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
- /*
- * construct label rewrite headers for each value value passed.
- * get the header in network byte order since we will paint it
- * on a packet in the data-plane
- */
+ /*
+ * pipe/uniform mode is only supported for the bottom of stack label
+ */
+ if (FIB_MPLS_LSP_MODE_UNIFORM == label_stack[ii].fml_mode)
+ {
+ mld->mld_flags |= MPLS_LABEL_DPO_FLAG_UNIFORM_MODE;
+ }
+ else
+ {
+ mld->mld_flags &= ~MPLS_LABEL_DPO_FLAG_UNIFORM_MODE;
+ }
+ dtype = mpls_label_dpo_types[mld->mld_flags];
- for (ii = 0; ii < mld->mld_n_labels-1; ii++)
- {
- vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
- vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, 255);
- vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, 0);
- vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, MPLS_NON_EOS);
- mld->mld_hdr[ii].label_exp_s_ttl =
- clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
+ /*
+ * stack this label object on its parent.
+ */
+ dpo_stack(dtype,
+ mld->mld_payload_proto,
+ &mld->mld_dpo,
+ parent);
}
- /*
- * the inner most label
- */
- ii = mld->mld_n_labels-1;
+ dpo_set(dpo,
+ dtype,
+ mld->mld_payload_proto,
+ mpls_label_dpo_get_index(mld));
+}
- vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
- vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, ttl);
- vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, exp);
- vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, eos);
- mld->mld_hdr[ii].label_exp_s_ttl =
- clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
+u8*
+format_mpls_label_dpo_flags (u8 *s, va_list *args)
+{
+ mpls_label_dpo_flags_t flags = va_arg (*args, int);
+ mpls_label_dpo_attr_t attr;
- /*
- * stack this label objct on its parent.
- */
- dpo_stack(DPO_MPLS_LABEL,
- mld->mld_payload_proto,
- &mld->mld_dpo,
- dpo);
+ FOR_EACH_MPLS_LABEL_DPO_ATTR(attr)
+ {
+ if ((1 << attr) & flags)
+ {
+ s = format(s, "%s,", mpls_label_dpo_attr_names[attr]);
+ }
+ }
- return (mpls_label_dpo_get_index(mld));
+ return (s);
}
u8*
@@ -117,17 +196,18 @@ format_mpls_label_dpo (u8 *s, va_list *args)
mpls_label_dpo_t *mld;
u32 ii;
- s = format(s, "mpls-label:[%d]:", index);
-
if (pool_is_free_index(mpls_label_dpo_pool, index))
{
/*
* the packet trace can be printed after the DPO has been deleted
*/
- return (s);
+ return (format(s, "mpls-label[???,%d]:", index));
}
mld = mpls_label_dpo_get(index);
+ s = format(s, "mpls-label[%U%d]:",
+ format_mpls_label_dpo_flags,
+ (int) mld->mld_flags, index);
for (ii = 0; ii < mld->mld_n_labels; ii++)
{
@@ -178,12 +258,21 @@ typedef struct mpls_label_imposition_trace_t_
* The MPLS header imposed
*/
mpls_unicast_header_t hdr;
+
+ /**
+ * TTL imposed - only valid for uniform LSPs
+ */
+ u8 ttl;
+
+ /**
+ * TTL imposed - only valid for uniform LSPs
+ */
+ u8 exp;
} mpls_label_imposition_trace_t;
always_inline mpls_unicast_header_t *
mpls_label_paint (vlib_buffer_t * b0,
- mpls_label_dpo_t *mld0,
- u8 ttl0)
+ mpls_label_dpo_t *mld0)
{
mpls_unicast_header_t *hdr0;
@@ -201,19 +290,74 @@ mpls_label_paint (vlib_buffer_t * b0,
clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
hdr0 = hdr0 + (mld0->mld_n_labels - 1);
}
+
+ return (hdr0);
+}
+
+/**
+ * Paint on an MPLS label and fixup the TTL
+ */
+always_inline mpls_unicast_header_t *
+mpls_label_paint_w_ttl (vlib_buffer_t * b0,
+ mpls_label_dpo_t *mld0,
+ u8 ttl0)
+{
+ mpls_unicast_header_t *hdr0;
+
+ hdr0 = mpls_label_paint(b0, mld0);
+
/* fixup the TTL for the inner most label */
((char*)hdr0)[3] = ttl0;
return (hdr0);
}
+/**
+ * Paint on an MPLS label and fixup the TTL and EXP bits.
+ */
+always_inline mpls_unicast_header_t *
+mpls_label_paint_w_ttl_exp (vlib_buffer_t * b0,
+ mpls_label_dpo_t *mld0,
+ u8 ttl0,
+ u8 exp0)
+{
+ mpls_unicast_header_t *hdr0;
+
+ hdr0 = mpls_label_paint_w_ttl(b0, mld0, ttl0);
+
+ /* fixup the EXP for the inner most label */
+ ((char*)hdr0)[2] |= (exp0 << 1);
+
+ return (hdr0);
+}
+
+/**
+ * Paint on an MPLS label and fixup the TTL and EXP bits
+ * When the EXP bits are *already* bit shift to the correct place in
+ * in the 2nd byte (i.e. they were read from another label)
+ */
+always_inline mpls_unicast_header_t *
+mpls_label_paint_w_ttl_mpls_exp (vlib_buffer_t * b0,
+ mpls_label_dpo_t *mld0,
+ u8 ttl0,
+ u8 exp0)
+{
+ mpls_unicast_header_t *hdr0;
+
+ hdr0 = mpls_label_paint_w_ttl(b0, mld0, ttl0);
+
+ /* fixup the EXP for the inner most label */
+ ((char*)hdr0)[2] |= exp0;
+
+ return (hdr0);
+}
+
always_inline uword
mpls_label_imposition_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame,
- u8 payload_is_ip4,
- u8 payload_is_ip6,
- u8 payload_is_ethernet)
+ const dpo_proto_t dproto,
+ const mpls_label_dpo_flags_t flags)
{
u32 n_left_from, next_index, * from, * to_next;
@@ -235,7 +379,8 @@ mpls_label_imposition_inline (vlib_main_t * vm,
mpls_label_dpo_t *mld0, *mld1, *mld2, *mld3;
vlib_buffer_t * b0, *b1, * b2, *b3;
u32 next0, next1, next2, next3;
- u8 ttl0, ttl1,ttl2, ttl3 ;
+ u8 ttl0, ttl1, ttl2, ttl3;
+ u8 exp0, exp1, exp2, exp3;
bi0 = to_next[0] = from[0];
bi1 = to_next[1] = from[1];
@@ -282,141 +427,247 @@ mpls_label_imposition_inline (vlib_main_t * vm,
mld2 = mpls_label_dpo_get(mldi2);
mld3 = mpls_label_dpo_get(mldi3);
- if (payload_is_ip4)
+ if (DPO_PROTO_MPLS != dproto)
{
/*
- * decrement the TTL on ingress to the LSP
+ * These are the non-MPLS payload imposition cases
*/
- ip4_header_t * ip0 = vlib_buffer_get_current(b0);
- ip4_header_t * ip1 = vlib_buffer_get_current(b1);
- ip4_header_t * ip2 = vlib_buffer_get_current(b2);
- ip4_header_t * ip3 = vlib_buffer_get_current(b3);
- u32 checksum0;
- u32 checksum1;
- u32 checksum2;
- u32 checksum3;
-
- checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
- checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100);
- checksum2 = ip2->checksum + clib_host_to_net_u16 (0x0100);
- checksum3 = ip3->checksum + clib_host_to_net_u16 (0x0100);
-
- checksum0 += checksum0 >= 0xffff;
- checksum1 += checksum1 >= 0xffff;
- checksum2 += checksum2 >= 0xffff;
- checksum3 += checksum3 >= 0xffff;
-
- ip0->checksum = checksum0;
- ip1->checksum = checksum1;
- ip2->checksum = checksum2;
- ip3->checksum = checksum3;
-
- ip0->ttl -= 1;
- ip1->ttl -= 1;
- ip2->ttl -= 1;
- ip3->ttl -= 1;
-
- ttl1 = ip1->ttl;
- ttl0 = ip0->ttl;
- ttl3 = ip3->ttl;
- ttl2 = ip2->ttl;
- }
- else if (payload_is_ip6)
- {
- /*
- * decrement the TTL on ingress to the LSP
- */
- ip6_header_t * ip0 = vlib_buffer_get_current(b0);
- ip6_header_t * ip1 = vlib_buffer_get_current(b1);
- ip6_header_t * ip2 = vlib_buffer_get_current(b2);
- ip6_header_t * ip3 = vlib_buffer_get_current(b3);
-
- ip0->hop_limit -= 1;
- ip1->hop_limit -= 1;
- ip2->hop_limit -= 1;
- ip3->hop_limit -= 1;
-
- ttl0 = ip0->hop_limit;
- ttl1 = ip1->hop_limit;
- ttl2 = ip2->hop_limit;
- ttl3 = ip3->hop_limit;
- }
- else if (payload_is_ethernet)
- {
+ if (DPO_PROTO_IP4 == dproto)
+ {
+ ip4_header_t * ip0 = vlib_buffer_get_current(b0);
+ ip4_header_t * ip1 = vlib_buffer_get_current(b1);
+ ip4_header_t * ip2 = vlib_buffer_get_current(b2);
+ ip4_header_t * ip3 = vlib_buffer_get_current(b3);
+
+ if (!(MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR & flags))
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ u32 checksum0;
+ u32 checksum1;
+ u32 checksum2;
+ u32 checksum3;
+
+ checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
+ checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100);
+ checksum2 = ip2->checksum + clib_host_to_net_u16 (0x0100);
+ checksum3 = ip3->checksum + clib_host_to_net_u16 (0x0100);
+
+ checksum0 += checksum0 >= 0xffff;
+ checksum1 += checksum1 >= 0xffff;
+ checksum2 += checksum2 >= 0xffff;
+ checksum3 += checksum3 >= 0xffff;
+
+ ip0->checksum = checksum0;
+ ip1->checksum = checksum1;
+ ip2->checksum = checksum2;
+ ip3->checksum = checksum3;
+
+ ip0->ttl -= 1;
+ ip1->ttl -= 1;
+ ip2->ttl -= 1;
+ ip3->ttl -= 1;
+ }
+
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ ttl1 = ip1->ttl;
+ ttl0 = ip0->ttl;
+ ttl3 = ip3->ttl;
+ ttl2 = ip2->ttl;
+ /* by default copy the 3 most significant bits */
+ exp0 = ip_dscp_to_mpls_exp(ip0->tos);
+ exp1 = ip_dscp_to_mpls_exp(ip1->tos);
+ exp2 = ip_dscp_to_mpls_exp(ip2->tos);
+ exp3 = ip_dscp_to_mpls_exp(ip3->tos);
+ }
+ }
+ else if (DPO_PROTO_IP6 == dproto)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ ip6_header_t * ip0 = vlib_buffer_get_current(b0);
+ ip6_header_t * ip1 = vlib_buffer_get_current(b1);
+ ip6_header_t * ip2 = vlib_buffer_get_current(b2);
+ ip6_header_t * ip3 = vlib_buffer_get_current(b3);
+
+ if (!(MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR & flags))
+ {
+ ip0->hop_limit -= 1;
+ ip1->hop_limit -= 1;
+ ip2->hop_limit -= 1;
+ ip3->hop_limit -= 1;
+ }
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ ttl0 = ip0->hop_limit;
+ ttl1 = ip1->hop_limit;
+ ttl2 = ip2->hop_limit;
+ ttl3 = ip3->hop_limit;
+ /* by default copy the 3 most significant bits */
+ exp0 = ip_dscp_to_mpls_exp(
+ ip6_traffic_class_network_order(ip0));
+ exp1 = ip_dscp_to_mpls_exp(
+ ip6_traffic_class_network_order(ip1));
+ exp2 = ip_dscp_to_mpls_exp(
+ ip6_traffic_class_network_order(ip2));
+ exp3 = ip_dscp_to_mpls_exp(
+ ip6_traffic_class_network_order(ip3));
+ }
+ }
+ else
+ {
+ /*
+ * nothing to change in the ethernet header
+ */
+ ttl0 = ttl1 = ttl2 = ttl3 = MPLS_LABEL_DEFAULT_TTL;
+ exp0 = exp1 = exp2 = exp3 = MPLS_LABEL_DEFAULT_EXP;
+ }
/*
- * nothing to chang ein the ethernet header
+ * These are the non-MPLS payload imposition cases.
+ * Based on the LSP mode either, for uniform, copy down the TTL
+ * and EXP from the payload or, for pipe mode, slap on the value
+ * requested from config
*/
- ttl0 = ttl1 = ttl2 = ttl3 = 255;
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ hdr0 = mpls_label_paint_w_ttl_exp(b0, mld0, ttl0, exp0);
+ hdr1 = mpls_label_paint_w_ttl_exp(b1, mld1, ttl1, exp1);
+ hdr2 = mpls_label_paint_w_ttl_exp(b2, mld2, ttl2, exp2);
+ hdr3 = mpls_label_paint_w_ttl_exp(b3, mld3, ttl3, exp3);
+ }
+ else
+ {
+ hdr0 = mpls_label_paint(b0, mld0);
+ hdr1 = mpls_label_paint(b1, mld1);
+ hdr2 = mpls_label_paint(b2, mld2);
+ hdr3 = mpls_label_paint(b3, mld3);
+ }
}
else
{
/*
* else, the packet to be encapped is an MPLS packet
+ * there are two cases to consider:
+ * 1 - this is an MPLS label swap at an LSP midpoint.
+ * recognisable because mpls.first = 1. In this case the
+ * TTL must be set to the current value -1.
+ * 2 - The MPLS packet is recursing (or being injected into)
+ * this LSP, in which case the pipe/uniform rules apply
+ *
*/
if (PREDICT_TRUE(vnet_buffer(b0)->mpls.first))
{
/*
- * The first label to be imposed on the packet. this is a label swap.
- * in which case we stashed the TTL and EXP bits in the
- * packet in the lookup node
+ * The first label to be imposed on the packet. this is a
+ * label swap.in which case we stashed the TTL and EXP bits
+ * in the packet in the lookup node
*/
ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
ttl0 = vnet_buffer(b0)->mpls.ttl - 1;
+ exp0 = vnet_buffer(b0)->mpls.exp;
+ hdr0 = mpls_label_paint_w_ttl_exp(b0, mld0, ttl0, exp0);
}
else
{
/*
- * not the first label. implying we are recusring down a chain of
- * output labels.
- * Each layer is considered a new LSP - hence the TTL is reset.
+ * not the first label. implying we are recusring down a
+ * chain of output labels. Each layer is considered a new
+ * LSP - hence the TTL/EXP are pipe/uniform handled
*/
- ttl0 = 255;
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ hdr0 = vlib_buffer_get_current(b0);
+ ttl0 = ((u8*)hdr0)[3];
+ exp0 = ((u8*)hdr0)[2] & 0xe;
+ hdr0 = mpls_label_paint_w_ttl_mpls_exp(b0, mld0, ttl0, exp0);
+ }
+ else
+ {
+ hdr0 = mpls_label_paint(b0, mld0);
+ }
}
if (PREDICT_TRUE(vnet_buffer(b1)->mpls.first))
{
- ASSERT(1 != vnet_buffer (b1)->mpls.ttl);
+ ASSERT(0 != vnet_buffer (b1)->mpls.ttl);
+
ttl1 = vnet_buffer(b1)->mpls.ttl - 1;
+ exp1 = vnet_buffer(b1)->mpls.exp;
+ hdr1 = mpls_label_paint_w_ttl_exp(b1, mld1, ttl1, exp1);
}
else
{
- ttl1 = 255;
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ hdr1 = vlib_buffer_get_current(b1);
+ ttl1 = ((u8*)hdr1)[3];
+ exp1 = ((u8*)hdr1)[2] & 0xe;
+ hdr1 = mpls_label_paint_w_ttl_mpls_exp(b1, mld1, ttl1, exp1);
+ }
+ else
+ {
+ hdr1 = mpls_label_paint(b1, mld1);
+ }
}
if (PREDICT_TRUE(vnet_buffer(b2)->mpls.first))
{
- ASSERT(1 != vnet_buffer (b2)->mpls.ttl);
+ ASSERT(0 != vnet_buffer (b2)->mpls.ttl);
ttl2 = vnet_buffer(b2)->mpls.ttl - 1;
+ exp2 = vnet_buffer(b2)->mpls.exp;
+ hdr2 = mpls_label_paint_w_ttl_exp(b2, mld2, ttl2, exp2);
}
else
{
- ttl2 = 255;
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ hdr2 = vlib_buffer_get_current(b2);
+ ttl2 = ((u8*)hdr2)[3];
+ exp2 = ((u8*)hdr2)[2] & 0xe;
+ hdr2 = mpls_label_paint_w_ttl_mpls_exp(b2, mld2, ttl2, exp2);
+ }
+ else
+ {
+ hdr2 = mpls_label_paint(b2, mld2);
+ }
}
if (PREDICT_TRUE(vnet_buffer(b3)->mpls.first))
{
- ASSERT(1 != vnet_buffer (b3)->mpls.ttl);
+ ASSERT(0 != vnet_buffer (b3)->mpls.ttl);
+
ttl3 = vnet_buffer(b3)->mpls.ttl - 1;
+ exp3 = vnet_buffer(b0)->mpls.exp;
+ hdr3 = mpls_label_paint_w_ttl_exp(b3, mld3, ttl3, exp3);
}
else
{
- ttl3 = 255;
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ hdr3 = vlib_buffer_get_current(b3);
+ ttl3 = ((u8*)hdr3)[3];
+ exp3 = ((u8*)hdr3)[2] & 0xe;
+ hdr3 = mpls_label_paint_w_ttl_mpls_exp(b3, mld3, ttl3, exp3);
+ }
+ else
+ {
+ hdr3 = mpls_label_paint(b3, mld3);
+ }
}
- }
- vnet_buffer(b0)->mpls.first = 0;
- vnet_buffer(b1)->mpls.first = 0;
- vnet_buffer(b2)->mpls.first = 0;
- vnet_buffer(b3)->mpls.first = 0;
- /* Paint the MPLS header */
- hdr0 = mpls_label_paint(b0, mld0, ttl0);
- hdr1 = mpls_label_paint(b1, mld1, ttl1);
- hdr2 = mpls_label_paint(b2, mld2, ttl2);
- hdr3 = mpls_label_paint(b3, mld3, ttl3);
+ vnet_buffer(b0)->mpls.first = 0;
+ vnet_buffer(b1)->mpls.first = 0;
+ vnet_buffer(b2)->mpls.first = 0;
+ vnet_buffer(b3)->mpls.first = 0;
+ }
next0 = mld0->mld_dpo.dpoi_next_node;
next1 = mld1->mld_dpo.dpoi_next_node;
next2 = mld2->mld_dpo.dpoi_next_node;
next3 = mld3->mld_dpo.dpoi_next_node;
+
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
vnet_buffer(b1)->ip.adj_index[VLIB_TX] = mld1->mld_dpo.dpoi_index;
vnet_buffer(b2)->ip.adj_index[VLIB_TX] = mld2->mld_dpo.dpoi_index;
@@ -427,24 +678,60 @@ mpls_label_imposition_inline (vlib_main_t * vm,
mpls_label_imposition_trace_t *tr =
vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->hdr = *hdr0;
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ tr->ttl = ttl0;
+ tr->exp = exp0;
+ }
+ else
+ {
+ tr->ttl = tr->exp = 0;
+ }
}
if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
{
mpls_label_imposition_trace_t *tr =
vlib_add_trace (vm, node, b1, sizeof (*tr));
tr->hdr = *hdr1;
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ tr->ttl = ttl1;
+ tr->exp = exp1;
+ }
+ else
+ {
+ tr->ttl = tr->exp = 0;
+ }
}
if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
{
mpls_label_imposition_trace_t *tr =
vlib_add_trace (vm, node, b2, sizeof (*tr));
tr->hdr = *hdr2;
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ tr->ttl = ttl2;
+ tr->exp = exp2;
+ }
+ else
+ {
+ tr->ttl = tr->exp = 0;
+ }
}
if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
{
mpls_label_imposition_trace_t *tr =
vlib_add_trace (vm, node, b3, sizeof (*tr));
tr->hdr = *hdr3;
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ tr->ttl = ttl3;
+ tr->exp = exp3;
+ }
+ else
+ {
+ tr->ttl = tr->exp = 0;
+ }
}
vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next,
@@ -459,8 +746,8 @@ mpls_label_imposition_inline (vlib_main_t * vm,
mpls_label_dpo_t *mld0;
vlib_buffer_t * b0;
u32 bi0, mldi0;
+ u8 ttl0, exp0;
u32 next0;
- u8 ttl;
bi0 = from[0];
to_next[0] = bi0;
@@ -475,67 +762,99 @@ mpls_label_imposition_inline (vlib_main_t * vm,
mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
mld0 = mpls_label_dpo_get(mldi0);
- if (payload_is_ip4)
+ if (DPO_PROTO_MPLS != dproto)
{
- /*
- * decrement the TTL on ingress to the LSP
- */
- ip4_header_t * ip0 = vlib_buffer_get_current(b0);
- u32 checksum0;
-
- checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
- checksum0 += checksum0 >= 0xffff;
+ if (DPO_PROTO_IP4 == dproto)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ ip4_header_t * ip0 = vlib_buffer_get_current(b0);
+ if (!(MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR & flags))
+ {
+ u32 checksum0;
+
+ checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
+ checksum0 += checksum0 >= 0xffff;
+
+ ip0->checksum = checksum0;
+ ip0->ttl -= 1;
+ }
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ ttl0 = ip0->ttl;
+ exp0 = ip_dscp_to_mpls_exp(ip0->tos);
+ }
+ }
+ else if (DPO_PROTO_IP6 == dproto)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ ip6_header_t * ip0 = vlib_buffer_get_current(b0);
+
+ if (!(MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR & flags))
+ {
+ ip0->hop_limit -= 1;
+ }
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ ttl0 = ip0->hop_limit;
+ exp0 = ip_dscp_to_mpls_exp(
+ ip6_traffic_class_network_order(ip0));
+ }
+ }
+ else
+ {
+ /*
+ * nothing to change in the ethernet header
+ */
+ ttl0 = MPLS_LABEL_DEFAULT_TTL;
+ exp0 = MPLS_LABEL_DEFAULT_EXP;
+ }
- ip0->checksum = checksum0;
- ip0->ttl -= 1;
- ttl = ip0->ttl;
- }
- else if (payload_is_ip6)
- {
/*
- * decrement the TTL on ingress to the LSP
+ * These are the non-MPLS payload imposition cases.
+ * Based on the LSP mode either, for uniform, copy down the TTL
+ * from the payload or, for pipe mode, slap on the value
+ * requested from config
*/
- ip6_header_t * ip0 = vlib_buffer_get_current(b0);
-
- ip0->hop_limit -= 1;
- ttl = ip0->hop_limit;
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ hdr0 = mpls_label_paint_w_ttl_exp(b0, mld0, ttl0, exp0);
+ }
+ else
+ {
+ hdr0 = mpls_label_paint(b0, mld0);
+ }
}
else
{
- /*
- * else, the packet to be encapped is an MPLS packet
- */
- if (vnet_buffer(b0)->mpls.first)
+ if (PREDICT_TRUE(vnet_buffer(b0)->mpls.first))
{
- /*
- * The first label to be imposed on the packet. this is a label swap.
- * in which case we stashed the TTL and EXP bits in the
- * packet in the lookup node
- */
ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
- ttl = vnet_buffer(b0)->mpls.ttl - 1;
+ ttl0 = vnet_buffer(b0)->mpls.ttl - 1;
+ exp0 = vnet_buffer(b0)->mpls.exp;
+ hdr0 = mpls_label_paint_w_ttl_exp(b0, mld0, ttl0, exp0);
}
else
{
- /*
- * not the first label. implying we are recusring down a chain of
- * output labels.
- * Each layer is considered a new LSP - hence the TTL is reset.
- */
- ttl = 255;
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ hdr0 = vlib_buffer_get_current(b0);
+ ttl0 = ((u8*)hdr0)[3];
+ exp0 = ((u8*)hdr0)[2] & 0xe;
+ hdr0 = mpls_label_paint_w_ttl_mpls_exp(b0, mld0, ttl0, exp0);
+ }
+ else
+ {
+ hdr0 = mpls_label_paint(b0, mld0);
+ }
}
- }
- vnet_buffer(b0)->mpls.first = 0;
-
- /* Paint the MPLS header */
- vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes));
- hdr0 = vlib_buffer_get_current(b0);
- clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
- /* fixup the TTL for the inner most label */
- hdr0 = hdr0 + (mld0->mld_n_labels - 1);
- ((char*)hdr0)[3] = ttl;
+ vnet_buffer(b0)->mpls.first = 0;
+ }
next0 = mld0->mld_dpo.dpoi_next_node;
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
@@ -545,7 +864,16 @@ mpls_label_imposition_inline (vlib_main_t * vm,
mpls_label_imposition_trace_t *tr =
vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->hdr = *hdr0;
- }
+ if (flags & MPLS_LABEL_DPO_FLAG_UNIFORM_MODE)
+ {
+ tr->ttl = ttl0;
+ tr->exp = exp0;
+ }
+ else
+ {
+ tr->ttl = tr->exp = 0;
+ }
+ }
vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
@@ -575,16 +903,18 @@ format_mpls_label_imposition_trace (u8 * s, va_list * args)
}
static uword
-mpls_label_imposition (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+mpls_mpls_label_imposition_pipe (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
- return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 0));
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_MPLS,
+ MPLS_LABEL_DPO_FLAG_NONE));
}
-VLIB_REGISTER_NODE (mpls_label_imposition_node) = {
- .function = mpls_label_imposition,
- .name = "mpls-label-imposition",
+VLIB_REGISTER_NODE (mpls_mpls_label_imposition_pipe_node) = {
+ .function = mpls_mpls_label_imposition_pipe,
+ .name = "mpls-label-imposition-pipe",
.vector_size = sizeof (u32),
.format_trace = format_mpls_label_imposition_trace,
@@ -593,20 +923,22 @@ VLIB_REGISTER_NODE (mpls_label_imposition_node) = {
[0] = "mpls-drop",
}
};
-VLIB_NODE_FUNCTION_MULTIARCH (mpls_label_imposition_node,
- mpls_label_imposition)
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_mpls_label_imposition_pipe_node,
+ mpls_mpls_label_imposition_pipe)
static uword
-ip4_mpls_label_imposition (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+ip4_mpls_label_imposition_pipe (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
- return (mpls_label_imposition_inline(vm, node, frame, 1, 0, 0));
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_IP4,
+ MPLS_LABEL_DPO_FLAG_NONE));
}
-VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = {
- .function = ip4_mpls_label_imposition,
- .name = "ip4-mpls-label-imposition",
+VLIB_REGISTER_NODE (ip4_mpls_label_imposition_pipe_node) = {
+ .function = ip4_mpls_label_imposition_pipe,
+ .name = "ip4-mpls-label-imposition-pipe",
.vector_size = sizeof (u32),
.format_trace = format_mpls_label_imposition_trace,
@@ -615,20 +947,22 @@ VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = {
[0] = "ip4-drop",
}
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_node,
- ip4_mpls_label_imposition)
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_pipe_node,
+ ip4_mpls_label_imposition_pipe)
static uword
-ip6_mpls_label_imposition (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+ip6_mpls_label_imposition_pipe (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
- return (mpls_label_imposition_inline(vm, node, frame, 0, 1, 0));
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_IP6,
+ MPLS_LABEL_DPO_FLAG_NONE));
}
-VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = {
- .function = ip6_mpls_label_imposition,
- .name = "ip6-mpls-label-imposition",
+VLIB_REGISTER_NODE (ip6_mpls_label_imposition_pipe_node) = {
+ .function = ip6_mpls_label_imposition_pipe,
+ .name = "ip6-mpls-label-imposition-pipe",
.vector_size = sizeof (u32),
.format_trace = format_mpls_label_imposition_trace,
@@ -637,20 +971,22 @@ VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = {
[0] = "ip6-drop",
}
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_node,
- ip6_mpls_label_imposition)
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_pipe_node,
+ ip6_mpls_label_imposition_pipe)
static uword
-ethernet_mpls_label_imposition (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+ethernet_mpls_label_imposition_pipe (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
- return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 1));
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_ETHERNET,
+ MPLS_LABEL_DPO_FLAG_NONE));
}
-VLIB_REGISTER_NODE (ethernet_mpls_label_imposition_node) = {
- .function = ethernet_mpls_label_imposition,
- .name = "ethernet-mpls-label-imposition",
+VLIB_REGISTER_NODE (ethernet_mpls_label_imposition_pipe_node) = {
+ .function = ethernet_mpls_label_imposition_pipe,
+ .name = "ethernet-mpls-label-imposition-pipe",
.vector_size = sizeof (u32),
.format_trace = format_mpls_label_imposition_trace,
@@ -659,8 +995,205 @@ VLIB_REGISTER_NODE (ethernet_mpls_label_imposition_node) = {
[0] = "error-drop",
}
};
-VLIB_NODE_FUNCTION_MULTIARCH (ethernet_mpls_label_imposition_node,
- ethernet_mpls_label_imposition)
+
+VLIB_NODE_FUNCTION_MULTIARCH (ethernet_mpls_label_imposition_pipe_node,
+ ethernet_mpls_label_imposition_pipe)
+
+static uword
+mpls_mpls_label_imposition_uniform (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_MPLS,
+ MPLS_LABEL_DPO_FLAG_UNIFORM_MODE));
+}
+
+VLIB_REGISTER_NODE (mpls_mpls_label_imposition_uniform_node) = {
+ .function = mpls_mpls_label_imposition_uniform,
+ .name = "mpls-label-imposition-uniform",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "mpls-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (mpls_mpls_label_imposition_uniform_node,
+ mpls_mpls_label_imposition_uniform)
+
+static uword
+ip4_mpls_label_imposition_uniform (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_IP4,
+ MPLS_LABEL_DPO_FLAG_UNIFORM_MODE));
+}
+
+VLIB_REGISTER_NODE (ip4_mpls_label_imposition_uniform_node) = {
+ .function = ip4_mpls_label_imposition_uniform,
+ .name = "ip4-mpls-label-imposition-uniform",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip4-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_uniform_node,
+ ip4_mpls_label_imposition_uniform)
+
+static uword
+ip6_mpls_label_imposition_uniform (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_IP6,
+ MPLS_LABEL_DPO_FLAG_UNIFORM_MODE));
+}
+
+VLIB_REGISTER_NODE (ip6_mpls_label_imposition_uniform_node) = {
+ .function = ip6_mpls_label_imposition_uniform,
+ .name = "ip6-mpls-label-imposition-uniform",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip6-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_uniform_node,
+ ip6_mpls_label_imposition_uniform)
+
+static uword
+ethernet_mpls_label_imposition_uniform (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_ETHERNET,
+ MPLS_LABEL_DPO_FLAG_UNIFORM_MODE));
+}
+
+VLIB_REGISTER_NODE (ethernet_mpls_label_imposition_uniform_node) = {
+ .function = ethernet_mpls_label_imposition_uniform,
+ .name = "ethernet-mpls-label-imposition-uniform",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ }
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ethernet_mpls_label_imposition_uniform_node,
+ ethernet_mpls_label_imposition_uniform)
+
+static uword
+ip4_mpls_label_imposition_pipe_no_ip_ttl_decr (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_IP4,
+ MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR));
+}
+
+VLIB_REGISTER_NODE (ip4_mpls_label_imposition_pipe_no_ip_ttl_decr_node) = {
+ .function = ip4_mpls_label_imposition_pipe_no_ip_ttl_decr,
+ .name = "ip4-mpls-label-imposition-pipe-no-ip-ttl-decr",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip4-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_pipe_no_ip_ttl_decr_node,
+ ip4_mpls_label_imposition_pipe_no_ip_ttl_decr)
+
+static uword
+ip6_mpls_label_imposition_pipe_no_ip_ttl_decr (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_IP6,
+ MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR));
+}
+
+VLIB_REGISTER_NODE (ip6_mpls_label_imposition_pipe_no_ip_ttl_decr_node) = {
+ .function = ip6_mpls_label_imposition_pipe_no_ip_ttl_decr,
+ .name = "ip6-mpls-label-imposition-pipe-no-ip-ttl-decr",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip6-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_pipe_no_ip_ttl_decr_node,
+ ip6_mpls_label_imposition_pipe_no_ip_ttl_decr)
+
+static uword
+ip4_mpls_label_imposition_uniform_no_ip_ttl_decr (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_IP4,
+ (MPLS_LABEL_DPO_FLAG_UNIFORM_MODE |
+ MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR)));
+}
+
+VLIB_REGISTER_NODE (ip4_mpls_label_imposition_uniform_no_ip_ttl_decr_node) = {
+ .function = ip4_mpls_label_imposition_uniform_no_ip_ttl_decr,
+ .name = "ip4-mpls-label-imposition-uniform-no-ip-ttl-decr",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip4-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_uniform_no_ip_ttl_decr_node,
+ ip4_mpls_label_imposition_uniform_no_ip_ttl_decr)
+
+static uword
+ip6_mpls_label_imposition_uniform_no_ip_ttl_decr (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_imposition_inline(vm, node, frame,
+ DPO_PROTO_IP6,
+ (MPLS_LABEL_DPO_FLAG_UNIFORM_MODE |
+ MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR)));
+}
+
+VLIB_REGISTER_NODE (ip6_mpls_label_imposition_uniform_no_ip_ttl_decr_node) = {
+ .function = ip6_mpls_label_imposition_uniform_no_ip_ttl_decr,
+ .name = "ip6-mpls-label-imposition-uniform-no-ip-ttl-decr",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_imposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip6-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_uniform_no_ip_ttl_decr_node,
+ ip6_mpls_label_imposition_uniform_no_ip_ttl_decr)
+
static void
mpls_label_dpo_mem_show (void)
@@ -678,38 +1211,118 @@ const static dpo_vft_t mld_vft = {
.dv_mem_show = mpls_label_dpo_mem_show,
};
-const static char* const mpls_label_imp_ip4_nodes[] =
+const static char* const mpls_label_imp_pipe_ip4_nodes[] =
{
- "ip4-mpls-label-imposition",
+ "ip4-mpls-label-imposition-pipe",
NULL,
};
-const static char* const mpls_label_imp_ip6_nodes[] =
+const static char* const mpls_label_imp_pipe_ip6_nodes[] =
{
- "ip6-mpls-label-imposition",
+ "ip6-mpls-label-imposition-pipe",
NULL,
};
-const static char* const mpls_label_imp_mpls_nodes[] =
+const static char* const mpls_label_imp_pipe_mpls_nodes[] =
{
- "mpls-label-imposition",
+ "mpls-label-imposition-pipe",
NULL,
};
-const static char* const mpls_label_imp_ethernet_nodes[] =
+const static char* const mpls_label_imp_pipe_ethernet_nodes[] =
{
- "ethernet-mpls-label-imposition",
+ "ethernet-mpls-label-imposition-pipe",
NULL,
};
-const static char* const * const mpls_label_imp_nodes[DPO_PROTO_NUM] =
+const static char* const * const mpls_label_imp_pipe_nodes[DPO_PROTO_NUM] =
{
- [DPO_PROTO_IP4] = mpls_label_imp_ip4_nodes,
- [DPO_PROTO_IP6] = mpls_label_imp_ip6_nodes,
- [DPO_PROTO_MPLS] = mpls_label_imp_mpls_nodes,
- [DPO_PROTO_ETHERNET] = mpls_label_imp_ethernet_nodes,
+ [DPO_PROTO_IP4] = mpls_label_imp_pipe_ip4_nodes,
+ [DPO_PROTO_IP6] = mpls_label_imp_pipe_ip6_nodes,
+ [DPO_PROTO_MPLS] = mpls_label_imp_pipe_mpls_nodes,
+ [DPO_PROTO_ETHERNET] = mpls_label_imp_pipe_ethernet_nodes,
};
+const static char* const mpls_label_imp_uniform_ip4_nodes[] =
+{
+ "ip4-mpls-label-imposition-uniform",
+ NULL,
+};
+const static char* const mpls_label_imp_uniform_ip6_nodes[] =
+{
+ "ip6-mpls-label-imposition-uniform",
+ NULL,
+};
+const static char* const mpls_label_imp_uniform_mpls_nodes[] =
+{
+ "mpls-label-imposition-uniform",
+ NULL,
+};
+const static char* const mpls_label_imp_uniform_ethernet_nodes[] =
+{
+ "ethernet-mpls-label-imposition-uniform",
+ NULL,
+};
+
+const static char* const * const mpls_label_imp_uniform_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = mpls_label_imp_uniform_ip4_nodes,
+ [DPO_PROTO_IP6] = mpls_label_imp_uniform_ip6_nodes,
+ [DPO_PROTO_MPLS] = mpls_label_imp_uniform_mpls_nodes,
+ [DPO_PROTO_ETHERNET] = mpls_label_imp_uniform_ethernet_nodes,
+};
+
+const static char* const mpls_label_imp_pipe_no_ip_tll_decr_ip4_nodes[] =
+{
+ "ip4-mpls-label-imposition-pipe-no-ip-ttl-decr",
+ NULL,
+};
+const static char* const mpls_label_imp_pipe_no_ip_tll_decr_ip6_nodes[] =
+{
+ "ip6-mpls-label-imposition-pipe-no-ip-ttl-decr",
+ NULL,
+};
+
+const static char* const * const mpls_label_imp_pipe_no_ip_tll_decr_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = mpls_label_imp_pipe_no_ip_tll_decr_ip4_nodes,
+ [DPO_PROTO_IP6] = mpls_label_imp_pipe_no_ip_tll_decr_ip6_nodes,
+};
+
+const static char* const mpls_label_imp_uniform_no_ip_tll_decr_ip4_nodes[] =
+{
+ "ip4-mpls-label-imposition-uniform-no-ip-ttl-decr",
+ NULL,
+};
+const static char* const mpls_label_imp_uniform_no_ip_tll_decr_ip6_nodes[] =
+{
+ "ip6-mpls-label-imposition-uniform-no-ip-ttl-decr",
+ NULL,
+};
+
+const static char* const * const mpls_label_imp_uniform_no_ip_tll_decr_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = mpls_label_imp_uniform_no_ip_tll_decr_ip4_nodes,
+ [DPO_PROTO_IP6] = mpls_label_imp_uniform_no_ip_tll_decr_ip6_nodes,
+};
void
mpls_label_dpo_module_init (void)
{
- dpo_register(DPO_MPLS_LABEL, &mld_vft, mpls_label_imp_nodes);
+ mpls_label_dpo_types[MPLS_LABEL_DPO_FLAG_NONE] =
+ dpo_register_new_type(&mld_vft,
+ mpls_label_imp_pipe_nodes);
+ mpls_label_dpo_types[MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR] =
+ dpo_register_new_type(&mld_vft,
+ mpls_label_imp_pipe_no_ip_tll_decr_nodes);
+ mpls_label_dpo_types[MPLS_LABEL_DPO_FLAG_UNIFORM_MODE] =
+ dpo_register_new_type(&mld_vft,
+ mpls_label_imp_uniform_nodes);
+ mpls_label_dpo_types[MPLS_LABEL_DPO_FLAG_UNIFORM_MODE |
+ MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR] =
+ dpo_register_new_type(&mld_vft,
+ mpls_label_imp_uniform_no_ip_tll_decr_nodes);
+}
+
+dpo_type_t
+mpls_label_dpo_get_type (mpls_label_dpo_flags_t flags)
+{
+ return (mpls_label_dpo_types[flags]);
}
diff --git a/src/vnet/dpo/mpls_label_dpo.h b/src/vnet/dpo/mpls_label_dpo.h
index 8494d26b495..98c88f7d812 100644
--- a/src/vnet/dpo/mpls_label_dpo.h
+++ b/src/vnet/dpo/mpls_label_dpo.h
@@ -20,11 +20,47 @@
#include <vnet/mpls/packet.h>
#include <vnet/dpo/dpo.h>
+/**
+ * Flags present on an MPLS label sourced path-extension
+ */
+typedef enum mpls_label_dpo_attr_t_
+{
+ /**
+ * Do not decrement the TTL of IP packet during imposition
+ */
+ MPLS_LABEL_DPO_ATTR_NO_IP_TTL_DECR,
+ MPLS_LABEL_DPO_ATTR_UNIFORM_MODE,
+} mpls_label_dpo_attr_t;
+
+#define MPLS_LABEL_DPO_ATTR_MAX (MPLS_LABEL_DPO_ATTR_UNIFORM_MODE+1)
+
+typedef enum mpls_label_dpo_flags_t_
+{
+ MPLS_LABEL_DPO_FLAG_NONE = 0,
+ MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR = (1 << MPLS_LABEL_DPO_ATTR_NO_IP_TTL_DECR),
+ MPLS_LABEL_DPO_FLAG_UNIFORM_MODE = (1 << MPLS_LABEL_DPO_ATTR_UNIFORM_MODE),
+} __attribute__ ((packed)) mpls_label_dpo_flags_t;
+
+#define MPLS_LABEL_DPO_ATTR_NAMES { \
+ [MPLS_LABEL_DPO_ATTR_NO_IP_TTL_DECR] = "no-ip-tll-decr", \
+ [MPLS_LABEL_DPO_ATTR_UNIFORM_MODE] = "uniform-mode", \
+}
+
+#define FOR_EACH_MPLS_LABEL_DPO_ATTR(_item) \
+ for (_item = MPLS_LABEL_DPO_ATTR_NO_IP_TTL_DECR; \
+ _item <= MPLS_LABEL_DPO_ATTR_UNIFORM_MODE; \
+ _item++)
+
+/**
+ * Format the flags variable
+ */
+extern u8* format_mpls_label_dpo_flags(u8 *s, va_list *args);
/**
* Maximum number of labels in one DPO
*/
#define MPLS_LABEL_DPO_MAX_N_LABELS 12
+
/**
* A representation of an MPLS label for imposition in the data-path
*/
@@ -47,9 +83,14 @@ typedef struct mpls_label_dpo_t
dpo_proto_t mld_payload_proto;
/**
+ * Flags
+ */
+ mpls_label_dpo_flags_t mld_flags;
+
+ /**
* Size of the label stack
*/
- u16 mld_n_labels;
+ u8 mld_n_labels;
/**
* Cached amount of header bytes to paint
@@ -75,18 +116,17 @@ STATIC_ASSERT((sizeof(mpls_label_dpo_t) <= CLIB_CACHE_LINE_BYTES),
*
* @param label_stack The stack if labels to impose, outer most label first
* @param eos The inner most label's EOS bit
- * @param ttl The inner most label's TTL bit
- * @param exp The inner most label's EXP bit
* @param payload_proto The ptocool of the payload packets that will
* be imposed with this label header.
- * @param dpo The parent of the created MPLS label object
+ * @param parent The parent of the created MPLS label object
+ * @param dpo The MPLS label DPO created
*/
-extern index_t mpls_label_dpo_create(mpls_label_t *label_stack,
- mpls_eos_bit_t eos,
- u8 ttl,
- u8 exp,
- dpo_proto_t payload_proto,
- const dpo_id_t *dpo);
+extern void mpls_label_dpo_create(fib_mpls_label_t *label_stack,
+ mpls_eos_bit_t eos,
+ dpo_proto_t payload_proto,
+ mpls_label_dpo_flags_t flags,
+ const dpo_id_t *paremt,
+ dpo_id_t *dpo);
extern u8* format_mpls_label_dpo(u8 *s, va_list *args);
@@ -104,4 +144,9 @@ mpls_label_dpo_get (index_t index)
extern void mpls_label_dpo_module_init(void);
+/*
+ * test function to get the registered DPO type for the flags
+ */
+extern dpo_type_t mpls_label_dpo_get_type(mpls_label_dpo_flags_t flags);
+
#endif
diff --git a/src/vnet/fib/fib_api.h b/src/vnet/fib/fib_api.h
index bbe4eaac74b..d10ba00913b 100644
--- a/src/vnet/fib/fib_api.h
+++ b/src/vnet/fib/fib_api.h
@@ -53,7 +53,7 @@ add_del_route_t_handler (u8 is_multipath,
u16 next_hop_weight,
u16 next_hop_preference,
mpls_label_t next_hop_via_label,
- mpls_label_t * next_hop_out_label_stack);
+ fib_mpls_label_t * next_hop_out_label_stack);
void
copy_fib_next_hop (fib_route_path_encode_t * api_rpath,
diff --git a/src/vnet/fib/fib_entry_src.c b/src/vnet/fib/fib_entry_src.c
index 6dc0c73a305..ec8c7393030 100644
--- a/src/vnet/fib/fib_entry_src.c
+++ b/src/vnet/fib/fib_entry_src.c
@@ -212,6 +212,7 @@ static int
fib_entry_src_valid_out_label (mpls_label_t label)
{
return ((MPLS_LABEL_IS_REAL(label) ||
+ MPLS_LABEL_POP == label ||
MPLS_IETF_IPV4_EXPLICIT_NULL_LABEL == label ||
MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL == label ||
MPLS_IETF_IMPLICIT_NULL_LABEL == label));
@@ -330,6 +331,7 @@ fib_entry_src_get_path_forwarding (fib_node_index_t path_index,
&nh->path_dpo);
fib_path_stack_mpls_disp(path_index,
fib_prefix_get_payload_proto(&ctx->fib_entry->fe_prefix),
+ FIB_MPLS_LSP_MODE_PIPE,
&nh->path_dpo);
break;
@@ -391,7 +393,7 @@ fib_entry_src_collect_forwarding (fib_node_index_t pl_index,
switch (path_ext->fpe_type)
{
case FIB_PATH_EXT_MPLS:
- if (fib_entry_src_valid_out_label(path_ext->fpe_label_stack[0]))
+ if (fib_entry_src_valid_out_label(path_ext->fpe_label_stack[0].fml_value))
{
/*
* found a matching extension. stack it to obtain the forwarding
diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c
index b4f9971f52e..a8593905438 100644
--- a/src/vnet/fib/fib_path.c
+++ b/src/vnet/fib/fib_path.c
@@ -1349,7 +1349,8 @@ fib_path_create (fib_node_index_t pl_index,
{
path->fp_type = FIB_PATH_TYPE_DEAG;
path->deag.fp_tbl_id = rpath->frp_fib_index;
- }
+ path->deag.fp_rpf_id = ~0;
+ }
}
else
{
@@ -2238,6 +2239,7 @@ fib_path_contribute_urpf (fib_node_index_t path_index,
void
fib_path_stack_mpls_disp (fib_node_index_t path_index,
dpo_proto_t payload_proto,
+ fib_mpls_lsp_mode_t mode,
dpo_id_t *dpo)
{
fib_path_t *path;
@@ -2253,10 +2255,8 @@ fib_path_stack_mpls_disp (fib_node_index_t path_index,
dpo_id_t tmp = DPO_INVALID;
dpo_copy(&tmp, dpo);
- dpo_set(dpo,
- DPO_MPLS_DISPOSITION,
- payload_proto,
- mpls_disp_dpo_create(payload_proto, ~0, &tmp));
+
+ mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
dpo_reset(&tmp);
break;
}
@@ -2265,12 +2265,10 @@ fib_path_stack_mpls_disp (fib_node_index_t path_index,
dpo_id_t tmp = DPO_INVALID;
dpo_copy(&tmp, dpo);
- dpo_set(dpo,
- DPO_MPLS_DISPOSITION,
- payload_proto,
- mpls_disp_dpo_create(payload_proto,
- path->deag.fp_rpf_id,
- &tmp));
+
+ mpls_disp_dpo_create(payload_proto,
+ path->deag.fp_rpf_id,
+ mode, &tmp, dpo);
dpo_reset(&tmp);
break;
}
diff --git a/src/vnet/fib/fib_path.h b/src/vnet/fib/fib_path.h
index 70b2f503aa8..28ec10a98d5 100644
--- a/src/vnet/fib/fib_path.h
+++ b/src/vnet/fib/fib_path.h
@@ -159,6 +159,7 @@ extern load_balance_path_t * fib_path_append_nh_for_multipath_hash(
load_balance_path_t *hash_key);
extern void fib_path_stack_mpls_disp(fib_node_index_t path_index,
dpo_proto_t payload_proto,
+ fib_mpls_lsp_mode_t mode,
dpo_id_t *dpo);
extern void fib_path_contribute_forwarding(fib_node_index_t path_index,
fib_forward_chain_type_t type,
diff --git a/src/vnet/fib/fib_path_ext.c b/src/vnet/fib/fib_path_ext.c
index a285ba07f7c..6b5b841c2ac 100644
--- a/src/vnet/fib/fib_path_ext.c
+++ b/src/vnet/fib/fib_path_ext.c
@@ -25,6 +25,7 @@
#include <vnet/fib/fib_internal.h>
const char *fib_path_ext_adj_flags_names[] = FIB_PATH_EXT_ADJ_ATTR_NAMES;
+const char *fib_path_ext_mpls_flags_names[] = FIB_PATH_EXT_MPLS_ATTR_NAMES;
u8 *
format_fib_path_ext (u8 * s, va_list * args)
@@ -38,30 +39,46 @@ format_fib_path_ext (u8 * s, va_list * args)
switch (path_ext->fpe_type)
{
- case FIB_PATH_EXT_MPLS:
- s = format(s, "labels:",
+ case FIB_PATH_EXT_MPLS: {
+ fib_path_ext_mpls_attr_t attr;
+
+ if (path_ext->fpe_mpls_flags)
+ {
+ s = format(s, "mpls-flags:[");
+
+ FOR_EACH_PATH_EXT_MPLS_ATTR(attr)
+ {
+ if ((1<<attr) & path_ext->fpe_mpls_flags) {
+ s = format(s, "%s", fib_path_ext_mpls_flags_names[attr]);
+ }
+ }
+ s = format(s, "]");
+ }
+ s = format(s, " labels:[",
path_ext->fpe_path_index);
for (ii = 0; ii < vec_len(path_ext->fpe_path.frp_label_stack); ii++)
{
- s = format(s, "%U ",
- format_mpls_unicast_label,
- path_ext->fpe_path.frp_label_stack[ii]);
+ s = format(s, "[%U]",
+ format_fib_mpls_label,
+ &path_ext->fpe_path.frp_label_stack[ii]);
}
+ s = format(s, "]");
break;
+ }
case FIB_PATH_EXT_ADJ: {
fib_path_ext_adj_attr_t attr;
- s = format(s, "adj-flags:");
if (path_ext->fpe_adj_flags)
{
+ s = format(s, "adj-flags:[");
FOR_EACH_PATH_EXT_ADJ_ATTR(attr)
{
- s = format(s, "%s", fib_path_ext_adj_flags_names[attr]);
+ if ((1<<attr) & path_ext->fpe_adj_flags)
+ {
+ s = format(s, "%s", fib_path_ext_adj_flags_names[attr]);
+ }
}
- }
- else
- {
- s = format(s, "None");
+ s = format(s, "]");
}
break;
}
@@ -121,12 +138,28 @@ fib_path_ext_init (fib_path_ext_t *path_ext,
/**
* @brief Return true if the label stack is implicit null
+ * imp-null and pop equate to the same this as this level -
+ * the label is coming off.
*/
static int
fib_path_ext_is_imp_null (fib_path_ext_t *path_ext)
{
return ((1 == vec_len(path_ext->fpe_label_stack)) &&
- (MPLS_IETF_IMPLICIT_NULL_LABEL == path_ext->fpe_label_stack[0]));
+ ((MPLS_IETF_IMPLICIT_NULL_LABEL == path_ext->fpe_label_stack[0].fml_value) ||
+ (MPLS_LABEL_POP == path_ext->fpe_label_stack[0].fml_value)));
+}
+
+mpls_label_dpo_flags_t
+fib_path_ext_mpls_flags_to_mpls_label (fib_path_ext_mpls_flags_t fpe_flags)
+{
+ mpls_label_dpo_flags_t ml_flags = MPLS_LABEL_DPO_FLAG_NONE;
+
+ if (fpe_flags &FIB_PATH_EXT_MPLS_FLAG_NO_IP_TTL_DECR)
+ {
+ ml_flags |= MPLS_LABEL_DPO_FLAG_NO_IP_TTL_DECR;
+ }
+
+ return (ml_flags);
}
load_balance_path_t *
@@ -236,24 +269,25 @@ fib_path_ext_stack (fib_path_ext_t *path_ext,
* we pickup the correct MPLS imposition nodes to do
* ip[46] processing.
*/
+ dpo_id_t parent = DPO_INVALID;
dpo_proto_t chain_proto;
mpls_eos_bit_t eos;
- index_t mldi;
eos = (child_fct == FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS ?
MPLS_NON_EOS :
MPLS_EOS);
chain_proto = fib_forw_chain_type_to_dpo_proto(child_fct);
- mldi = mpls_label_dpo_create(path_ext->fpe_label_stack,
- eos, 255, 0,
- chain_proto,
- &nh->path_dpo);
+ dpo_copy(&parent, &nh->path_dpo);
+ mpls_label_dpo_create(path_ext->fpe_label_stack,
+ eos,
+ chain_proto,
+ fib_path_ext_mpls_flags_to_mpls_label(
+ path_ext->fpe_mpls_flags),
+ &parent,
+ &nh->path_dpo);
- dpo_set(&nh->path_dpo,
- DPO_MPLS_LABEL,
- chain_proto,
- mldi);
+ dpo_reset(&parent);
}
else if (child_fct == FIB_FORW_CHAIN_TYPE_MPLS_EOS)
{
@@ -262,6 +296,7 @@ fib_path_ext_stack (fib_path_ext_t *path_ext,
*/
fib_path_stack_mpls_disp(nh->path_index,
fib_forw_chain_type_to_dpo_proto(parent_fct),
+ path_ext->fpe_label_stack[0].fml_mode,
&nh->path_dpo);
}
}
diff --git a/src/vnet/fib/fib_path_ext.h b/src/vnet/fib/fib_path_ext.h
index d07941c108b..b49fd977a20 100644
--- a/src/vnet/fib/fib_path_ext.h
+++ b/src/vnet/fib/fib_path_ext.h
@@ -60,6 +60,32 @@ typedef enum fib_path_ext_adj_flags_t_
_item++)
/**
+ * Flags present on an MPLS label sourced path-extension
+ */
+typedef enum fib_path_ext_mpls_attr_t_
+{
+ /**
+ * Do not decrement the TTL of IP packet during imposition
+ */
+ FIB_PATH_EXT_MPLS_ATTR_NO_IP_TTL_DECR,
+} fib_path_ext_mpls_attr_t;
+
+typedef enum fib_path_ext_mpls_flags_t_
+{
+ FIB_PATH_EXT_MPLS_FLAG_NONE = 0,
+ FIB_PATH_EXT_MPLS_FLAG_NO_IP_TTL_DECR = (1 << FIB_PATH_EXT_MPLS_ATTR_NO_IP_TTL_DECR),
+} fib_path_ext_mpls_flags_t;
+
+#define FIB_PATH_EXT_MPLS_ATTR_NAMES { \
+ [FIB_PATH_EXT_MPLS_ATTR_NO_IP_TTL_DECR] = "no-ip-tll-decr", \
+}
+
+#define FOR_EACH_PATH_EXT_MPLS_ATTR(_item) \
+ for (_item = FIB_PATH_EXT_MPLS_ATTR_NO_IP_TTL_DECR; \
+ _item <= FIB_PATH_EXT_MPLS_ATTR_NO_IP_TTL_DECR; \
+ _item++)
+
+/**
* A path extension is a per-entry addition to the forwarding information
* when packets are sent for that entry over that path.
*
@@ -86,6 +112,12 @@ typedef struct fib_path_ext_t_
* Flags describing the adj state
*/
fib_path_ext_adj_flags_t fpe_adj_flags;
+ /**
+ * For an MPLS type extension
+ *
+ * Flags describing the mpls state
+ */
+ fib_path_ext_mpls_flags_t fpe_mpls_flags;
};
/**
@@ -98,7 +130,7 @@ typedef struct fib_path_ext_t_
* position in the path-list.
*/
fib_node_index_t fpe_path_index;
-} __attribute__ ((packed)) fib_path_ext_t;
+} __attribute__ ((packed)) fib_path_ext_t;
extern u8 * format_fib_path_ext(u8 * s, va_list * args);
diff --git a/src/vnet/fib/fib_table.c b/src/vnet/fib/fib_table.c
index e9173484dec..324a35fe1e8 100644
--- a/src/vnet/fib/fib_table.c
+++ b/src/vnet/fib/fib_table.c
@@ -520,7 +520,7 @@ fib_table_entry_path_add (u32 fib_index,
u32 next_hop_sw_if_index,
u32 next_hop_fib_index,
u32 next_hop_weight,
- mpls_label_t *next_hop_labels,
+ fib_mpls_label_t *next_hop_labels,
fib_route_path_flags_t path_flags)
{
fib_route_path_t path = {
@@ -770,7 +770,7 @@ fib_table_entry_update_one_path (u32 fib_index,
u32 next_hop_sw_if_index,
u32 next_hop_fib_index,
u32 next_hop_weight,
- mpls_label_t *next_hop_labels,
+ fib_mpls_label_t *next_hop_labels,
fib_route_path_flags_t path_flags)
{
fib_node_index_t fib_entry_index;
diff --git a/src/vnet/fib/fib_table.h b/src/vnet/fib/fib_table.h
index ddc00e537c2..ffad3c43d3e 100644
--- a/src/vnet/fib/fib_table.h
+++ b/src/vnet/fib/fib_table.h
@@ -338,7 +338,7 @@ extern fib_node_index_t fib_table_entry_path_add(u32 fib_index,
u32 next_hop_sw_if_index,
u32 next_hop_fib_index,
u32 next_hop_weight,
- mpls_label_t *next_hop_label_stack,
+ fib_mpls_label_t *next_hop_label_stack,
fib_route_path_flags_t pf);
/**
* @brief
@@ -521,7 +521,7 @@ extern fib_node_index_t fib_table_entry_update_one_path(u32 fib_index,
u32 next_hop_sw_if_index,
u32 next_hop_fib_index,
u32 next_hop_weight,
- mpls_label_t *next_hop_label_stack,
+ fib_mpls_label_t *next_hop_label_stack,
fib_route_path_flags_t pf);
/**
diff --git a/src/vnet/fib/fib_test.c b/src/vnet/fib/fib_test.c
index 2a30b3cd4cf..8f7bba0369a 100644
--- a/src/vnet/fib/fib_test.c
+++ b/src/vnet/fib/fib_test.c
@@ -301,7 +301,8 @@ fib_test_validate_rep_v (const replicate_t *rep,
{
const mpls_label_dpo_t *mld;
mpls_label_t hdr;
- FIB_TEST_LB((DPO_MPLS_LABEL == dpo->dpoi_type),
+ FIB_TEST_LB((mpls_label_dpo_get_type(MPLS_LABEL_DPO_FLAG_NONE)
+ == dpo->dpoi_type),
"bucket %d stacks on %U",
bucket,
format_dpo_type, dpo->dpoi_type);
@@ -375,14 +376,19 @@ fib_test_validate_lb_v (const load_balance_t *lb,
case FT_LB_LABEL_STACK_O_ADJ:
{
const mpls_label_dpo_t *mld;
+ mpls_label_dpo_flags_t mf;
mpls_label_t hdr;
u32 ii;
- FIB_TEST_LB((DPO_MPLS_LABEL == dpo->dpoi_type),
+ mf = ((exp->label_stack_o_adj.mode ==
+ FIB_MPLS_LSP_MODE_UNIFORM) ?
+ MPLS_LABEL_DPO_FLAG_UNIFORM_MODE :
+ MPLS_LABEL_DPO_FLAG_NONE);
+ FIB_TEST_LB((mpls_label_dpo_get_type(mf) == dpo->dpoi_type),
"bucket %d stacks on %U",
bucket,
format_dpo_type, dpo->dpoi_type);
-
+
mld = mpls_label_dpo_get(dpo->dpoi_index);
FIB_TEST_LB(exp->label_stack_o_adj.label_stack_size == mld->mld_n_labels,
@@ -433,7 +439,8 @@ fib_test_validate_lb_v (const load_balance_t *lb,
{
const mpls_label_dpo_t *mld;
mpls_label_t hdr;
- FIB_TEST_LB((DPO_MPLS_LABEL == dpo->dpoi_type),
+ FIB_TEST_LB((mpls_label_dpo_get_type(MPLS_LABEL_DPO_FLAG_NONE)
+ == dpo->dpoi_type),
"bucket %d stacks on %U",
bucket,
format_dpo_type, dpo->dpoi_type);
@@ -468,13 +475,18 @@ fib_test_validate_lb_v (const load_balance_t *lb,
case FT_LB_LABEL_O_LB:
{
const mpls_label_dpo_t *mld;
+ mpls_label_dpo_flags_t mf;
mpls_label_t hdr;
- FIB_TEST_LB((DPO_MPLS_LABEL == dpo->dpoi_type),
+ mf = ((exp->label_o_lb.mode ==
+ FIB_MPLS_LSP_MODE_UNIFORM) ?
+ MPLS_LABEL_DPO_FLAG_UNIFORM_MODE :
+ MPLS_LABEL_DPO_FLAG_NONE);
+ FIB_TEST_LB((mpls_label_dpo_get_type(mf) == dpo->dpoi_type),
"bucket %d stacks on %U",
bucket,
format_dpo_type, dpo->dpoi_type);
-
+
mld = mpls_label_dpo_get(dpo->dpoi_index);
hdr = clib_net_to_host_u32(mld->mld_hdr[0].label_exp_s_ttl);
@@ -515,15 +527,15 @@ fib_test_validate_lb_v (const load_balance_t *lb,
bucket,
exp->adj.adj);
break;
- case FT_LB_MPLS_DISP_O_ADJ:
+ case FT_LB_MPLS_DISP_PIPE_O_ADJ:
{
const mpls_disp_dpo_t *mdd;
- FIB_TEST_I((DPO_MPLS_DISPOSITION == dpo->dpoi_type),
+ FIB_TEST_I((DPO_MPLS_DISPOSITION_PIPE == dpo->dpoi_type),
"bucket %d stacks on %U",
bucket,
format_dpo_type, dpo->dpoi_type);
-
+
mdd = mpls_disp_dpo_get(dpo->dpoi_index);
dpo = &mdd->mdd_dpo;
@@ -6332,8 +6344,10 @@ fib_test_label (void)
.eos = MPLS_NON_EOS,
},
};
- mpls_label_t *l99 = NULL;
- vec_add1(l99, 99);
+ fib_mpls_label_t *l99 = NULL, fml99 = {
+ .fml_value = 99,
+ };
+ vec_add1(l99, fml99);
fib_table_entry_update_one_path(fib_index,
&pfx_1_1_1_1_s_32,
@@ -6371,8 +6385,10 @@ fib_test_label (void)
.adj = ai_mpls_10_10_11_1,
},
};
- mpls_label_t *l_imp_null = NULL;
- vec_add1(l_imp_null, MPLS_IETF_IMPLICIT_NULL_LABEL);
+ fib_mpls_label_t *l_imp_null = NULL, fml_imp_null = {
+ .fml_value = MPLS_IETF_IMPLICIT_NULL_LABEL,
+ };
+ vec_add1(l_imp_null, fml_imp_null);
fei = fib_table_entry_path_add(fib_index,
&pfx_1_1_1_1_s_32,
@@ -6413,7 +6429,7 @@ fib_test_label (void)
.fp_eos = MPLS_NON_EOS,
};
fib_test_lb_bucket_t disp_o_10_10_11_1 = {
- .type = FT_LB_MPLS_DISP_O_ADJ,
+ .type = FT_LB_MPLS_DISP_PIPE_O_ADJ,
.adj = {
.adj = ai_v4_10_10_11_1,
},
@@ -6458,7 +6474,7 @@ fib_test_label (void)
},
};
fib_test_lb_bucket_t disp_o_10_10_11_2 = {
- .type = FT_LB_MPLS_DISP_O_ADJ,
+ .type = FT_LB_MPLS_DISP_PIPE_O_ADJ,
.adj = {
.adj = ai_v4_10_10_11_2,
},
@@ -6540,24 +6556,27 @@ fib_test_label (void)
.lb = non_eos_1_1_1_1.dpoi_index,
.label = 1600,
.eos = MPLS_EOS,
+ .mode = FIB_MPLS_LSP_MODE_UNIFORM,
},
};
- mpls_label_t *l1600 = NULL;
- vec_add1(l1600, 1600);
+ fib_mpls_label_t *l1600 = NULL, fml1600 = {
+ .fml_value = 1600,
+ .fml_mode = FIB_MPLS_LSP_MODE_UNIFORM,
+ };
+ vec_add1(l1600, fml1600);
- fib_table_entry_update_one_path(fib_index,
- &pfx_2_2_2_2_s_32,
- FIB_SOURCE_API,
- FIB_ENTRY_FLAG_NONE,
- DPO_PROTO_IP4,
- &pfx_1_1_1_1_s_32.fp_addr,
- ~0,
- fib_index,
- 1,
- l1600,
- FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_entry_update_one_path(fib_index,
+ &pfx_2_2_2_2_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ DPO_PROTO_IP4,
+ &pfx_1_1_1_1_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ l1600,
+ FIB_ROUTE_PATH_FLAG_NONE);
- fei = fib_table_lookup(fib_index, &pfx_2_2_2_2_s_32);
FIB_TEST(fib_test_validate_entry(fei,
FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
1,
@@ -6811,7 +6830,7 @@ fib_test_label (void)
* add back the path with the valid label
*/
l99 = NULL;
- vec_add1(l99, 99);
+ vec_add1(l99, fml99);
fib_table_entry_path_add(fib_index,
&pfx_1_1_1_1_s_32,
@@ -6941,8 +6960,10 @@ fib_test_label (void)
.eos = MPLS_EOS,
},
};
- mpls_label_t *l101 = NULL;
- vec_add1(l101, 101);
+ fib_mpls_label_t *l101 = NULL, fml101 = {
+ .fml_value = 101,
+ };
+ vec_add1(l101, fml101);
fei = fib_table_entry_update_one_path(fib_index,
&pfx_1_1_1_2_s_32,
@@ -6981,8 +7002,10 @@ fib_test_label (void)
.eos = MPLS_EOS,
},
};
- mpls_label_t *l1601 = NULL;
- vec_add1(l1601, 1601);
+ fib_mpls_label_t *l1601 = NULL, fml1601 = {
+ .fml_value = 1601,
+ };
+ vec_add1(l1601, fml1601);
l1600_eos_o_1_1_1_1.label_o_lb.lb = non_eos_1_1_1_1.dpoi_index;
@@ -7012,7 +7035,7 @@ fib_test_label (void)
* the LB for the recursive can use an imp-null
*/
l_imp_null = NULL;
- vec_add1(l_imp_null, MPLS_IETF_IMPLICIT_NULL_LABEL);
+ vec_add1(l_imp_null, fml_imp_null);
fei = fib_table_entry_update_one_path(fib_index,
&pfx_1_1_1_2_s_32,
@@ -7176,11 +7199,11 @@ fib_test_label (void)
.eos = MPLS_EOS,
},
};
- mpls_label_t *label_stack = NULL;
+ fib_mpls_label_t *label_stack = NULL;
vec_validate(label_stack, 7);
for (ii = 0; ii < 8; ii++)
{
- label_stack[ii] = ii + 200;
+ label_stack[ii].fml_value = ii + 200;
}
fei = fib_table_entry_update_one_path(fib_index,
@@ -8255,12 +8278,13 @@ static int
lfib_test (void)
{
const mpls_label_t deag_label = 50;
+ dpo_id_t dpo = DPO_INVALID;
+ const mpls_disp_dpo_t *mdd;
const u32 lfib_index = 0;
const u32 fib_index = 0;
- dpo_id_t dpo = DPO_INVALID;
+ const lookup_dpo_t *lkd;
const dpo_id_t *dpo1;
fib_node_index_t lfe;
- lookup_dpo_t *lkd;
test_main_t *tm;
int lb_count;
adj_index_t ai_mpls_10_10_10_1;
@@ -8327,7 +8351,6 @@ lfib_test (void)
format_mpls_eos_bit, MPLS_EOS,
format_dpo_proto, lkd->lkd_proto);
-
/*
* A route deag route for EOS
*/
@@ -8358,7 +8381,14 @@ lfib_test (void)
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
&dpo);
dpo1 = load_balance_get_bucket(dpo.dpoi_index, 0);
- lkd = lookup_dpo_get(dpo1->dpoi_index);
+ mdd = mpls_disp_dpo_get(dpo1->dpoi_index);
+
+ FIB_TEST((FIB_MPLS_LSP_MODE_PIPE == mdd->mdd_mode),
+ "%U/%U disp is pipe mode",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS);
+
+ lkd = lookup_dpo_get(mdd->mdd_dpo.dpoi_index);
FIB_TEST((fib_index == lkd->lkd_fib_index),
"%U/%U is deag in %d %U",
@@ -8383,6 +8413,70 @@ lfib_test (void)
"%U/%U not present",
format_mpls_unicast_label, deag_label,
format_mpls_eos_bit, MPLS_EOS);
+ dpo_reset(&dpo);
+
+ /*
+ * A route deag route for EOS with LSP mode uniform
+ */
+ fib_mpls_label_t *l_pops = NULL, l_pop = {
+ .fml_value = MPLS_LABEL_POP,
+ .fml_mode = FIB_MPLS_LSP_MODE_UNIFORM,
+ };
+ vec_add1(l_pops, l_pop);
+ lfe = fib_table_entry_path_add(lfib_index,
+ &pfx,
+ FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_NONE,
+ DPO_PROTO_IP4,
+ &zero_addr,
+ ~0,
+ fib_index,
+ 1,
+ l_pops,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST((lfe == fib_table_lookup(lfib_index, &pfx)),
+ "%U/%U present",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS);
+
+ fib_entry_contribute_forwarding(lfe,
+ FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ &dpo);
+ dpo1 = load_balance_get_bucket(dpo.dpoi_index, 0);
+ mdd = mpls_disp_dpo_get(dpo1->dpoi_index);
+
+ FIB_TEST((FIB_MPLS_LSP_MODE_UNIFORM == mdd->mdd_mode),
+ "%U/%U disp is uniform mode",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS);
+
+ lkd = lookup_dpo_get(mdd->mdd_dpo.dpoi_index);
+
+ FIB_TEST((fib_index == lkd->lkd_fib_index),
+ "%U/%U is deag in %d %U",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS,
+ lkd->lkd_fib_index,
+ format_dpo_id, &dpo, 0);
+ FIB_TEST((LOOKUP_INPUT_DST_ADDR == lkd->lkd_input),
+ "%U/%U is dst deag",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS);
+ FIB_TEST((DPO_PROTO_IP4 == lkd->lkd_proto),
+ "%U/%U is %U dst deag",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS,
+ format_dpo_proto, lkd->lkd_proto);
+
+ fib_table_entry_delete_index(lfe, FIB_SOURCE_CLI);
+
+ FIB_TEST((FIB_NODE_INDEX_INVALID == fib_table_lookup(lfib_index,
+ &pfx)),
+ "%U/%U not present",
+ format_mpls_unicast_label, deag_label,
+ format_mpls_eos_bit, MPLS_EOS);
+ dpo_reset(&dpo);
/*
* A route deag route for non-EOS
@@ -8460,11 +8554,15 @@ lfib_test (void)
};
dpo_id_t neos_1200 = DPO_INVALID;
dpo_id_t ip_1200 = DPO_INVALID;
- mpls_label_t *l200 = NULL;
- vec_add1(l200, 200);
- vec_add1(l200, 300);
- vec_add1(l200, 400);
- vec_add1(l200, 500);
+ fib_mpls_label_t *l200 = NULL;
+ u32 ii;
+ for (ii = 0; ii < 4; ii++)
+ {
+ fib_mpls_label_t fml = {
+ .fml_value = 200 + (ii * 100),
+ };
+ vec_add1(l200, fml);
+ };
lfe = fib_table_entry_update_one_path(fib_index,
&pfx_1200,
@@ -8523,8 +8621,10 @@ lfib_test (void)
.ip4.as_u32 = clib_host_to_net_u32(0x02020204),
},
};
- mpls_label_t *l999 = NULL;
- vec_add1(l999, 999);
+ fib_mpls_label_t *l999 = NULL, fml_999 = {
+ .fml_value = 999,
+ };
+ vec_add1(l999, fml_999);
rpaths[0].frp_label_stack = l999,
fib_table_entry_path_add2(fib_index,
@@ -8690,8 +8790,10 @@ lfib_test (void)
.adj = idpo.dpoi_index,
},
};
- mpls_label_t *l3300 = NULL;
- vec_add1(l3300, 3300);
+ fib_mpls_label_t *l3300 = NULL, fml_3300 = {
+ .fml_value = 3300,
+ };
+ vec_add1(l3300, fml_3300);
lfe = fib_table_entry_update_one_path(lfib_index,
&pfx_3500,
@@ -8772,6 +8874,9 @@ lfib_test (void)
0, 1);
mpls_table_delete(MPLS_FIB_DEFAULT_TABLE_ID, FIB_SOURCE_API);
+ FIB_TEST(0 == pool_elts(mpls_disp_dpo_pool),
+ "mpls_disp_dpo resources freed %d of %d",
+ 0, pool_elts(mpls_disp_dpo_pool));
FIB_TEST(lb_count == pool_elts(load_balance_pool),
"Load-balance resources freed %d of %d",
lb_count, pool_elts(load_balance_pool));
diff --git a/src/vnet/fib/fib_test.h b/src/vnet/fib/fib_test.h
index 0309b3f2311..8ac068350c4 100644
--- a/src/vnet/fib/fib_test.h
+++ b/src/vnet/fib/fib_test.h
@@ -29,7 +29,7 @@ typedef enum fib_test_lb_bucket_type_t_ {
FT_LB_LABEL_STACK_O_ADJ,
FT_LB_LABEL_O_LB,
FT_LB_O_LB,
- FT_LB_MPLS_DISP_O_ADJ,
+ FT_LB_MPLS_DISP_PIPE_O_ADJ,
FT_LB_INTF,
FT_LB_L2,
FT_LB_BIER_TABLE,
@@ -47,6 +47,7 @@ typedef struct fib_test_lb_bucket_t_ {
{
mpls_eos_bit_t eos;
mpls_label_t label;
+ fib_mpls_lsp_mode_t mode;
u8 ttl;
adj_index_t adj;
} label_o_adj;
@@ -54,6 +55,7 @@ typedef struct fib_test_lb_bucket_t_ {
{
mpls_eos_bit_t eos;
mpls_label_t label_stack[8];
+ fib_mpls_lsp_mode_t mode;
u8 label_stack_size;
u8 ttl;
adj_index_t adj;
@@ -62,6 +64,7 @@ typedef struct fib_test_lb_bucket_t_ {
{
mpls_eos_bit_t eos;
mpls_label_t label;
+ fib_mpls_lsp_mode_t mode;
u8 ttl;
index_t lb;
} label_o_lb;
diff --git a/src/vnet/fib/fib_types.api b/src/vnet/fib/fib_types.api
index 61a0898819d..fde2c337190 100644
--- a/src/vnet/fib/fib_types.api
+++ b/src/vnet/fib/fib_types.api
@@ -12,6 +12,16 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/** \brief MPLS label
+*/
+typeonly define fib_mpls_label
+{
+ u8 is_uniform;
+ u32 label;
+ u8 ttl;
+ u8 exp;
+};
/** \brief FIB path
@param sw_if_index - index of the interface
@@ -20,6 +30,8 @@
is prefered
@param is_local - local if non-zero, else remote
@param is_drop - Drop the packet
+ @param is_unreach - Drop the packet and rate limit send ICMP unreachable
+ @param is_prohibit - Drop the packet and rate limit send ICMP prohibited
@param is_udp_encap - The path describes a UDP-o-IP encapsulation.
@param afi - the afi of the next hop, IP46_TYPE_IP4=1, IP46_TYPE_IP6=2
@param next_hop[16] - the next hop address
@@ -27,7 +39,7 @@
that has a unique identifier. e.g. the UDP
encap object
*/
-typeonly define fib_path3
+typeonly define fib_path
{
u32 sw_if_index;
u32 table_id;
@@ -36,10 +48,12 @@ typeonly define fib_path3
u8 is_local;
u8 is_drop;
u8 is_udp_encap;
+ u8 is_unreach;
+ u8 is_prohibit;
u8 afi;
u8 next_hop[16];
u32 next_hop_id;
u32 rpf_id;
u8 n_labels;
- u32 label_stack[16];
+ vl_api_fib_mpls_label_t label_stack[16];
};
diff --git a/src/vnet/fib/fib_types.c b/src/vnet/fib/fib_types.c
index f38c8154e0c..8b1faf5fa86 100644
--- a/src/vnet/fib/fib_types.c
+++ b/src/vnet/fib/fib_types.c
@@ -25,6 +25,7 @@
static const char* fib_protocol_names[] = FIB_PROTOCOLS;
static const char* vnet_link_names[] = VNET_LINKS;
static const char* fib_forw_chain_names[] = FIB_FORW_CHAINS;
+static const char* fib_mpls_lsp_mode_names[] = FIB_MPLS_LSP_MODES;
u8 *
format_fib_protocol (u8 * s, va_list * ap)
@@ -50,6 +51,30 @@ format_fib_forw_chain_type (u8 * s, va_list * args)
return (format (s, "%s", fib_forw_chain_names[fct]));
}
+u8 *
+format_fib_mpls_lsp_mode(u8 *s, va_list *ap)
+{
+ fib_mpls_lsp_mode_t mode = va_arg(*ap, int);
+
+ return (format (s, "%s", fib_mpls_lsp_mode_names[mode]));
+}
+
+u8 *
+format_fib_mpls_label (u8 *s, va_list *ap)
+{
+ fib_mpls_label_t *label = va_arg(*ap, fib_mpls_label_t *);
+
+ s = format(s, "%U %U ttl:%d exp:%d",
+ format_mpls_unicast_label,
+ label->fml_value,
+ format_fib_mpls_lsp_mode,
+ label->fml_mode,
+ label->fml_ttl,
+ label->fml_exp);
+
+ return (s);
+}
+
void
fib_prefix_from_ip46_addr (const ip46_address_t *addr,
fib_prefix_t *pfx)
@@ -307,6 +332,29 @@ fib_forw_chain_type_to_link_type (fib_forward_chain_type_t fct)
return (VNET_LINK_IP4);
}
+fib_forward_chain_type_t
+fib_forw_chain_type_from_link_type (vnet_link_t link_type)
+{
+ switch (link_type)
+ {
+ case VNET_LINK_IP4:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+ case VNET_LINK_IP6:
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
+ case VNET_LINK_MPLS:
+ return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
+ case VNET_LINK_ETHERNET:
+ return (FIB_FORW_CHAIN_TYPE_ETHERNET);
+ case VNET_LINK_NSH:
+ return (FIB_FORW_CHAIN_TYPE_NSH);
+ case VNET_LINK_ARP:
+ break;
+ }
+
+ ASSERT(0);
+ return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
+}
+
dpo_proto_t
fib_forw_chain_type_to_dpo_proto (fib_forward_chain_type_t fct)
{
@@ -475,7 +523,10 @@ unformat_fib_route_path (unformat_input_t * input, va_list * args)
while (unformat (input, "%U",
unformat_mpls_unicast_label, &out_label))
{
- vec_add1(rpath->frp_label_stack, out_label);
+ fib_mpls_label_t fml = {
+ .fml_value = out_label,
+ };
+ vec_add1(rpath->frp_label_stack, fml);
}
}
else if (unformat (input, "%U",
diff --git a/src/vnet/fib/fib_types.h b/src/vnet/fib/fib_types.h
index 75ed7799f12..7f186ac6f6a 100644
--- a/src/vnet/fib/fib_types.h
+++ b/src/vnet/fib/fib_types.h
@@ -163,11 +163,16 @@ typedef enum fib_forward_chain_type_t_ {
_item++)
/**
- * @brief Convert from a chain type to the adjacencies link type
+ * @brief Convert from a chain type to the adjacency's link type
*/
extern vnet_link_t fib_forw_chain_type_to_link_type(fib_forward_chain_type_t fct);
/**
+ * @brief Convert from a adjacency's link type to chain type
+ */
+extern fib_forward_chain_type_t fib_forw_chain_type_from_link_type(vnet_link_t lt);
+
+/**
* @brief Convert from a payload-protocol to a chain type.
*/
extern fib_forward_chain_type_t fib_forw_chain_type_from_dpo_proto(dpo_proto_t proto);
@@ -373,6 +378,64 @@ typedef u32 fib_rpf_id_t;
#define MFIB_RPF_ID_NONE (0)
/**
+ * MPLS LSP mode - only valid at the head and tail
+ */
+typedef enum fib_mpls_lsp_mode_t_
+{
+ /**
+ * Pipe Mode - the default.
+ * TTL and DSCP markings are not carried between the layers
+ */
+ FIB_MPLS_LSP_MODE_PIPE,
+ /**
+ * Uniform mode.
+ * TTL and DSCP are copied between the layers
+ */
+ FIB_MPLS_LSP_MODE_UNIFORM,
+} __attribute__((packed)) fib_mpls_lsp_mode_t;
+
+#define FIB_MPLS_LSP_MODES { \
+ [FIB_MPLS_LSP_MODE_PIPE] = "pipe", \
+ [FIB_MPLS_LSP_MODE_UNIFORM] = "uniform", \
+}
+
+/**
+ * Format an LSP mode type
+ */
+extern u8 * format_fib_mpls_lsp_mode(u8 *s, va_list *ap);
+
+/**
+ * Configuration for each label value in the output-stack
+ */
+typedef struct fib_mpls_label_t_
+{
+ /**
+ * The label value
+ */
+ mpls_label_t fml_value;
+
+ /**
+ * The LSP mode
+ */
+ fib_mpls_lsp_mode_t fml_mode;
+
+ /**
+ * TTL. valid only at imposition.
+ */
+ u8 fml_ttl;
+
+ /**
+ * EXP bits; valid only at imposition.
+ */
+ u8 fml_exp;
+} fib_mpls_label_t;
+
+/**
+ * Format an MPLS label
+ */
+extern u8 * format_fib_mpls_label(u8 *s, va_list *ap);
+
+/**
* @brief
* A representation of a path as described by a route producer.
* These paramenters will determine the path 'type', of which there are:
@@ -444,7 +507,7 @@ typedef struct fib_route_path_t_ {
/**
* The outgoing MPLS label Stack. NULL implies no label.
*/
- mpls_label_t *frp_label_stack;
+ fib_mpls_label_t *frp_label_stack;
};
/**
* A path that resolves via a BIER Table.
diff --git a/src/vnet/ip/ip.api b/src/vnet/ip/ip.api
index b94d6d748b2..282f531e4fb 100644
--- a/src/vnet/ip/ip.api
+++ b/src/vnet/ip/ip.api
@@ -19,7 +19,8 @@
called through a shared memory interface.
*/
-option version = "1.0.1";
+option version = "1.1.0";
+import "vnet/fib/fib_types.api";
/** \brief Add / del table request
A table can be added multiple times, but need be deleted only once.
@@ -52,33 +53,6 @@ define ip_fib_dump
u32 context;
};
-/** \brief FIB path
- @param sw_if_index - index of the interface
- @param weight - The weight, for UCMP
- @param preference - The preference of the path. lowest preference is prefered
- @param is_local - local if non-zero, else remote
- @param is_drop - Drop the packet
- @param is_unreach - Drop the packet and rate limit send ICMP unreachable
- @param is_prohibit - Drop the packet and rate limit send ICMP prohibited
- @param afi - the afi of the next hop, IP46_TYPE_IP4=1, IP46_TYPE_IP6=2
- @param next_hop[16] - the next hop address
-
- WARNING: this type is replicated, pending cleanup completion
-*/
-typeonly manual_print manual_endian define fib_path
-{
- u32 sw_if_index;
- u32 table_id;
- u8 weight;
- u8 preference;
- u8 is_local;
- u8 is_drop;
- u8 is_unreach;
- u8 is_prohibit;
- u8 afi;
- u8 next_hop[16];
-};
-
/** \brief IP FIB table response
@param table_id - IP fib table id
@address_length - mask length
@@ -420,7 +394,7 @@ autoreply define ip_add_del_route
u8 next_hop_address[16];
u8 next_hop_n_out_labels;
u32 next_hop_via_label;
- u32 next_hop_out_label_stack[next_hop_n_out_labels];
+ vl_api_fib_mpls_label_t next_hop_out_label_stack[next_hop_n_out_labels];
};
/** \brief Add / del route request
diff --git a/src/vnet/ip/ip6_packet.h b/src/vnet/ip/ip6_packet.h
index 76e3c1f93f0..a02f8b2ea8e 100644
--- a/src/vnet/ip/ip6_packet.h
+++ b/src/vnet/ip/ip6_packet.h
@@ -342,11 +342,27 @@ typedef struct
} ip6_header_t;
always_inline u8
-ip6_traffic_class (ip6_header_t * i)
+ip6_traffic_class (const ip6_header_t * i)
{
return (i->ip_version_traffic_class_and_flow_label & 0x0FF00000) >> 20;
}
+static_always_inline u8
+ip6_traffic_class_network_order (const ip6_header_t * ip6)
+{
+ return (clib_net_to_host_u32 (ip6->ip_version_traffic_class_and_flow_label)
+ & 0x0ff00000) >> 20;
+}
+
+static_always_inline void
+ip6_set_traffic_class_network_order (ip6_header_t * ip6, u8 dscp)
+{
+ u32 tmp =
+ clib_net_to_host_u32 (ip6->ip_version_traffic_class_and_flow_label);
+ tmp |= (dscp << 20);
+ ip6->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32 (tmp);
+}
+
always_inline void *
ip6_next_header (ip6_header_t * i)
{
diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c
index 60fa2faf926..726d24ca5a8 100644
--- a/src/vnet/ip/ip_api.c
+++ b/src/vnet/ip/ip_api.c
@@ -867,7 +867,7 @@ add_del_route_t_handler (u8 is_multipath,
u16 next_hop_weight,
u16 next_hop_preference,
mpls_label_t next_hop_via_label,
- mpls_label_t * next_hop_out_label_stack)
+ fib_mpls_label_t * next_hop_out_label_stack)
{
vnet_classify_main_t *cm = &vnet_classify_main;
fib_route_path_flags_t path_flags = FIB_ROUTE_PATH_FLAG_NONE;
@@ -1071,7 +1071,7 @@ static int
ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
{
u32 fib_index, next_hop_fib_index;
- mpls_label_t *label_stack = NULL;
+ fib_mpls_label_t *label_stack = NULL;
int rv, ii, n_labels;;
rv = add_del_route_check (FIB_PROTOCOL_IP4,
@@ -1097,13 +1097,19 @@ ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
n_labels = mp->next_hop_n_out_labels;
if (n_labels == 0)
;
- else if (1 == n_labels)
- vec_add1 (label_stack, ntohl (mp->next_hop_out_label_stack[0]));
else
{
vec_validate (label_stack, n_labels - 1);
for (ii = 0; ii < n_labels; ii++)
- label_stack[ii] = ntohl (mp->next_hop_out_label_stack[ii]);
+ {
+ label_stack[ii].fml_value =
+ ntohl (mp->next_hop_out_label_stack[ii].label);
+ label_stack[ii].fml_ttl = mp->next_hop_out_label_stack[ii].ttl;
+ label_stack[ii].fml_exp = mp->next_hop_out_label_stack[ii].exp;
+ label_stack[ii].fml_mode =
+ (mp->next_hop_out_label_stack[ii].is_uniform ?
+ FIB_MPLS_LSP_MODE_UNIFORM : FIB_MPLS_LSP_MODE_PIPE);
+ }
}
return (add_del_route_t_handler (mp->is_multipath,
@@ -1133,8 +1139,8 @@ ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
static int
ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
{
+ fib_mpls_label_t *label_stack = NULL;
u32 fib_index, next_hop_fib_index;
- mpls_label_t *label_stack = NULL;
int rv, ii, n_labels;;
rv = add_del_route_check (FIB_PROTOCOL_IP6,
@@ -1160,13 +1166,19 @@ ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp)
n_labels = mp->next_hop_n_out_labels;
if (n_labels == 0)
;
- else if (1 == n_labels)
- vec_add1 (label_stack, ntohl (mp->next_hop_out_label_stack[0]));
else
{
vec_validate (label_stack, n_labels - 1);
for (ii = 0; ii < n_labels; ii++)
- label_stack[ii] = ntohl (mp->next_hop_out_label_stack[ii]);
+ {
+ label_stack[ii].fml_value =
+ ntohl (mp->next_hop_out_label_stack[ii].label);
+ label_stack[ii].fml_ttl = mp->next_hop_out_label_stack[ii].ttl;
+ label_stack[ii].fml_exp = mp->next_hop_out_label_stack[ii].exp;
+ label_stack[ii].fml_mode =
+ (mp->next_hop_out_label_stack[ii].is_uniform ?
+ FIB_MPLS_LSP_MODE_UNIFORM : FIB_MPLS_LSP_MODE_PIPE);
+ }
}
return (add_del_route_t_handler (mp->is_multipath,
diff --git a/src/vnet/mfib/mfib_test.c b/src/vnet/mfib/mfib_test.c
index 2cb663a5eb6..f60d428cfcf 100644
--- a/src/vnet/mfib/mfib_test.c
+++ b/src/vnet/mfib/mfib_test.c
@@ -1108,8 +1108,10 @@ mfib_test_i (fib_protocol_t PROTO,
.eos = MPLS_EOS,
},
};
- mpls_label_t *l3300 = NULL;
- vec_add1(l3300, 3300);
+ fib_mpls_label_t *l3300 = NULL, fml3300 = {
+ .fml_value = 3300,
+ };
+ vec_add1(l3300, fml3300);
/*
* MPLS enable an interface so we get the MPLS table created
diff --git a/src/vnet/mpls/mpls.api b/src/vnet/mpls/mpls.api
index 572ac915a4a..6047d255aa7 100644
--- a/src/vnet/mpls/mpls.api
+++ b/src/vnet/mpls/mpls.api
@@ -13,7 +13,8 @@
* limitations under the License.
*/
-option version = "1.0.1";
+option version = "1.1.0";
+import "vnet/fib/fib_types.api";
/** \brief Bind/Unbind an MPLS local label to an IP prefix. i.e. create
a per-prefix label entry.
@@ -40,21 +41,6 @@ autoreply define mpls_ip_bind_unbind
u8 mb_address[16];
};
-/** \brief MPLS tunnel Add / del route
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param mt_is_add - Is this a route add or delete
- @param mt_sw_if_index - The SW interface index of the tunnel to delete
- @param mt_is_multicast - Is the tunnel's underlying LSP multicast
- @param mt_next_hop_proto_is_ip4 - The next-hop is IPV4
- @param mt_next_hop_weight - The weight, for UCMP
- @param mt_next_hop_preference - The preference
- @param mt_next_hop[16] - the nextop address
- @param mt_next_hop_sw_if_index - the next-hop SW interface
- @param mt_next_hop_table_id - the next-hop table-id (if appropriate)
- @param mt_next_hop_n_out_labels - the number of next-hop output labels
- @param mt_next_hop_out_label_stack - the next-hop output label stack, outer most first
-*/
define mpls_tunnel_add_del
{
u32 client_index;
@@ -70,7 +56,7 @@ define mpls_tunnel_add_del
u8 mt_next_hop_n_out_labels;
u32 mt_next_hop_sw_if_index;
u32 mt_next_hop_table_id;
- u32 mt_next_hop_out_label_stack[mt_next_hop_n_out_labels];
+ vl_api_fib_mpls_label_t mt_next_hop_out_label_stack[mt_next_hop_n_out_labels];
};
/** \brief Reply for MPLS tunnel add / del request
@@ -96,34 +82,6 @@ define mpls_tunnel_dump
i32 tunnel_index;
};
-/** \brief FIB path
- @param sw_if_index - index of the interface
- @param weight - The weight, for UCMP
- @param is_local - local if non-zero, else remote
- @param is_drop - Drop the packet
- @param is_unreach - Drop the packet and rate limit send ICMP unreachable
- @param is_prohibit - Drop the packet and rate limit send ICMP prohibited
- @param afi - the afi of the next hop, IP46_TYPE_IP4=1, IP46_TYPE_IP6=2
- @param next_hop[16] - the next hop address
-
- WARNING: this type is replicated, pending cleanup completion
-
-*/
-typeonly manual_print manual_endian define fib_path2
-{
- u32 sw_if_index;
- u32 table_id;
- u8 weight;
- u8 preference;
- u8 is_local;
- u8 is_drop;
- u8 is_unreach;
- u8 is_prohibit;
- u8 afi;
- u8 next_hop[16];
- u32 labels[16];
-};
-
/** \brief mpls tunnel details
*/
manual_endian manual_print define mpls_tunnel_details
@@ -134,7 +92,7 @@ manual_endian manual_print define mpls_tunnel_details
u8 mt_l2_only;
u8 mt_is_multicast;
u32 mt_count;
- vl_api_fib_path2_t mt_paths[mt_count];
+ vl_api_fib_path_t mt_paths[mt_count];
};
/** \brief MPLS Route Add / del route
@@ -207,7 +165,7 @@ autoreply define mpls_route_add_del
u32 mr_next_hop_sw_if_index;
u32 mr_next_hop_table_id;
u32 mr_next_hop_via_label;
- u32 mr_next_hop_out_label_stack[mr_next_hop_n_out_labels];
+ vl_api_fib_mpls_label_t mr_next_hop_out_label_stack[mr_next_hop_n_out_labels];
};
/** \brief Dump MPLS fib table
@@ -234,7 +192,7 @@ manual_endian manual_print define mpls_fib_details
u8 eos_bit;
u32 label;
u32 count;
- vl_api_fib_path2_t path[count];
+ vl_api_fib_path_t path[count];
};
/** \brief Enable or Disable MPLS on and interface
diff --git a/src/vnet/mpls/mpls.c b/src/vnet/mpls/mpls.c
index 25957fb3fea..be72d3f829f 100644
--- a/src/vnet/mpls/mpls.c
+++ b/src/vnet/mpls/mpls.c
@@ -47,6 +47,9 @@ u8 * format_mpls_unicast_label (u8 * s, va_list * args)
case MPLS_IETF_GAL_LABEL:
s = format (s, "%s", MPLS_IETF_GAL_STRING);
break;
+ case MPLS_LABEL_POP:
+ s = format (s, "pop");
+ break;
default:
s = format (s, "%d", label);
break;
diff --git a/src/vnet/mpls/mpls_api.c b/src/vnet/mpls/mpls_api.c
index 36fa610e8ca..169ee406a91 100644
--- a/src/vnet/mpls/mpls_api.c
+++ b/src/vnet/mpls/mpls_api.c
@@ -170,8 +170,8 @@ static int
mpls_route_add_del_t_handler (vnet_main_t * vnm,
vl_api_mpls_route_add_del_t * mp)
{
+ fib_mpls_label_t *label_stack = NULL;
u32 fib_index, next_hop_fib_index;
- mpls_label_t *label_stack = NULL;
int rv, ii, n_labels;;
fib_prefix_t pfx = {
@@ -211,13 +211,19 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm,
n_labels = mp->mr_next_hop_n_out_labels;
if (n_labels == 0)
;
- else if (1 == n_labels)
- vec_add1 (label_stack, ntohl (mp->mr_next_hop_out_label_stack[0]));
else
{
vec_validate (label_stack, n_labels - 1);
for (ii = 0; ii < n_labels; ii++)
- label_stack[ii] = ntohl (mp->mr_next_hop_out_label_stack[ii]);
+ {
+ label_stack[ii].fml_value =
+ ntohl (mp->mr_next_hop_out_label_stack[ii].label);
+ label_stack[ii].fml_ttl = mp->mr_next_hop_out_label_stack[ii].ttl;
+ label_stack[ii].fml_exp = mp->mr_next_hop_out_label_stack[ii].exp;
+ label_stack[ii].fml_mode =
+ (mp->mr_next_hop_out_label_stack[ii].is_uniform ?
+ FIB_MPLS_LSP_MODE_UNIFORM : FIB_MPLS_LSP_MODE_PIPE);
+ }
}
/* *INDENT-OFF* */
@@ -323,8 +329,16 @@ vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp)
if (mp->mt_is_add)
{
for (ii = 0; ii < mp->mt_next_hop_n_out_labels; ii++)
- vec_add1 (rpath.frp_label_stack,
- ntohl (mp->mt_next_hop_out_label_stack[ii]));
+ {
+ fib_mpls_label_t fml = {
+ .fml_value = ntohl (mp->mt_next_hop_out_label_stack[ii].label),
+ .fml_ttl = mp->mt_next_hop_out_label_stack[ii].ttl,
+ .fml_exp = mp->mt_next_hop_out_label_stack[ii].exp,
+ .fml_mode = (mp->mt_next_hop_out_label_stack[ii].is_uniform ?
+ FIB_MPLS_LSP_MODE_UNIFORM : FIB_MPLS_LSP_MODE_PIPE),
+ };
+ vec_add1 (rpath.frp_label_stack, fml);
+ }
}
vec_add1 (rpaths, rpath);
@@ -388,7 +402,7 @@ send_mpls_tunnel_entry (u32 mti, void *arg)
mpls_tunnel_send_walk_ctx_t *ctx;
vl_api_mpls_tunnel_details_t *mp;
const mpls_tunnel_t *mt;
- vl_api_fib_path2_t *fp;
+ vl_api_fib_path_t *fp;
u32 n;
ctx = arg;
@@ -399,8 +413,8 @@ send_mpls_tunnel_entry (u32 mti, void *arg)
mt = mpls_tunnel_get (mti);
n = fib_path_list_get_n_paths (mt->mt_path_list);
- mp = vl_msg_api_alloc (sizeof (*mp) + n * sizeof (vl_api_fib_path2_t));
- memset (mp, 0, sizeof (*mp) + n * sizeof (vl_api_fib_path2_t));
+ mp = vl_msg_api_alloc (sizeof (*mp) + n * sizeof (vl_api_fib_path_t));
+ memset (mp, 0, sizeof (*mp) + n * sizeof (vl_api_fib_path_t));
mp->_vl_msg_id = ntohs (VL_API_MPLS_TUNNEL_DETAILS);
mp->context = ctx->context;
@@ -456,7 +470,7 @@ send_mpls_fib_details (vpe_api_main_t * am,
{
vl_api_mpls_fib_details_t *mp;
fib_route_path_encode_t *api_rpath;
- vl_api_fib_path2_t *fp;
+ vl_api_fib_path_t *fp;
int path_count;
path_count = vec_len (api_rpaths);
diff --git a/src/vnet/mpls/mpls_input.c b/src/vnet/mpls/mpls_input.c
index 86ad8bba270..d1881d4050b 100644
--- a/src/vnet/mpls/mpls_input.c
+++ b/src/vnet/mpls/mpls_input.c
@@ -51,10 +51,11 @@ format_mpls_input_trace (u8 * s, va_list * args)
foreach_mpls_input_next;
#undef _
- s = format (s, "MPLS: next %s[%d] label %d ttl %d",
+ s = format (s, "MPLS: next %s[%d] label %d ttl %d exp %d",
next_name, t->next_index,
vnet_mpls_uc_get_label(label),
- vnet_mpls_uc_get_ttl(label));
+ vnet_mpls_uc_get_ttl(label),
+ vnet_mpls_uc_get_exp(label));
return s;
}
@@ -74,21 +75,13 @@ mpls_input_inline (vlib_main_t * vm,
vlib_frame_t * from_frame)
{
u32 n_left_from, next_index, * from, * to_next;
- mpls_input_runtime_t * rt;
- mpls_main_t * mm;
+ mpls_main_t * mm = &mpls_main;
u32 thread_index = vlib_get_thread_index();
vlib_simple_counter_main_t * cm;
vnet_main_t * vnm = vnet_get_main();
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
- rt = vlib_node_get_runtime_data (vm, mpls_input_node.index);
- mm = rt->mpls_main;
- /*
- * Force an initial lookup every time, in case the control-plane
- * changed the label->FIB mapping.
- */
- rt->last_label = ~0;
next_index = node->cached_next_index;
@@ -279,18 +272,11 @@ VLIB_NODE_FUNCTION_MULTIARCH (mpls_input_node, mpls_input)
static void
mpls_setup_nodes (vlib_main_t * vm)
{
- mpls_input_runtime_t * rt;
pg_node_t * pn;
pn = pg_get_node (mpls_input_node.index);
pn->unformat_edit = unformat_pg_mpls_header;
- rt = vlib_node_get_runtime_data (vm, mpls_input_node.index);
- rt->last_label = (u32) ~0;
- rt->last_inner_fib_index = 0;
- rt->last_outer_fib_index = 0;
- rt->mpls_main = &mpls_main;
-
ethernet_register_input_type (vm, ETHERNET_TYPE_MPLS,
mpls_input_node.index);
}
@@ -309,16 +295,3 @@ static clib_error_t * mpls_input_init (vlib_main_t * vm)
}
VLIB_INIT_FUNCTION (mpls_input_init);
-
-static clib_error_t * mpls_input_worker_init (vlib_main_t * vm)
-{
- mpls_input_runtime_t * rt;
- rt = vlib_node_get_runtime_data (vm, mpls_input_node.index);
- rt->last_label = (u32) ~0;
- rt->last_inner_fib_index = 0;
- rt->last_outer_fib_index = 0;
- rt->mpls_main = &mpls_main;
- return 0;
-}
-
-VLIB_WORKER_INIT_FUNCTION (mpls_input_worker_init);
diff --git a/src/vnet/mpls/mpls_tunnel.c b/src/vnet/mpls/mpls_tunnel.c
index 8ed2c409ce8..c2067d81b00 100644
--- a/src/vnet/mpls/mpls_tunnel.c
+++ b/src/vnet/mpls/mpls_tunnel.c
@@ -123,6 +123,12 @@ mpls_tunnel_collect_forwarding (fib_node_index_t pl_index,
path_ext = fib_path_ext_list_find_by_path_index(&ctx->mt->mt_path_exts,
path_index);
+ /*
+ * we don't want IP TTL decrements for packets hitting the MPLS labels
+ * we stack on, since the IP TTL decrement is done by the adj
+ */
+ path_ext->fpe_mpls_flags |= FIB_PATH_EXT_MPLS_FLAG_NO_IP_TTL_DECR;
+
if (NULL != path_ext)
{
/*
@@ -273,9 +279,8 @@ mpls_tunnel_stack (adj_index_t ai)
mpls_tunnel_mk_lb(mt,
adj->ia_link,
- (VNET_LINK_MPLS == adj_get_link_type(ai) ?
- FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
- FIB_FORW_CHAIN_TYPE_MPLS_EOS),
+ fib_forw_chain_type_from_link_type(
+ adj_get_link_type(ai)),
&dpo);
adj_nbr_midchain_stack(ai, &dpo);
@@ -521,6 +526,11 @@ mpls_tunnel_tx (vlib_main_t * vm,
b0 = vlib_get_buffer(vm, bi0);
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_lb.dpoi_index;
+ /* since we are coming out of the L2 world, where the vlib_buffer
+ * union is used for other things, make sure it is clean for
+ * MPLS from now on.
+ */
+ vnet_buffer(b0)->mpls.first = 0;
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
diff --git a/src/vnet/mpls/mpls_types.h b/src/vnet/mpls/mpls_types.h
index f1c3191e00c..c21bdf1eace 100644
--- a/src/vnet/mpls/mpls_types.h
+++ b/src/vnet/mpls/mpls_types.h
@@ -47,6 +47,14 @@
#define MPLS_LABEL_INVALID (MPLS_IETF_MAX_LABEL+1)
+/**
+ * A value that is explicit about the end of the LSP. Specifying
+ * a label value is needed when the mode configuration (pipe/uniform)
+ * is also requested.
+ * imp-null implies a label swap. pop can be used for a deag.
+ */
+#define MPLS_LABEL_POP (MPLS_IETF_MAX_LABEL+2)
+
#define MPLS_LABEL_IS_REAL(_lbl) \
(((_lbl) > MPLS_IETF_MIN_UNRES_LABEL) && \
((_lbl) <= MPLS_IETF_MAX_UNRES_LABEL))
diff --git a/src/vnet/mpls/packet.h b/src/vnet/mpls/packet.h
index bc67445be89..ca6ac407686 100644
--- a/src/vnet/mpls/packet.h
+++ b/src/vnet/mpls/packet.h
@@ -42,6 +42,32 @@ typedef enum mpls_eos_bit_t_
[MPLS_EOS] = "eos", \
}
+/**
+ * The Default TTL added to MPLS label headers when no other value is available
+ */
+#define MPLS_LABEL_DEFAULT_TTL 64
+
+/**
+ * The Default EXP added to MPLS label headers when no other value is available
+ */
+#define MPLS_LABEL_DEFAULT_EXP 0
+
+/**
+ * When in uniform mode convert an IPv[46] DSCP value to an MPLS EXP value
+ */
+static inline u8 ip_dscp_to_mpls_exp (u8 tos)
+{
+ return (tos >> 5);
+}
+
+/**
+ * When in uniform mode convert an MPLS EXP value to an IPv[46] DSCP value
+ */
+static inline u8 mpls_exp_to_ip_dscp (u8 exp)
+{
+ return (exp << 5);
+}
+
#define FOR_EACH_MPLS_EOS_BIT(_eos) \
for (_eos = MPLS_NON_EOS; _eos <= MPLS_EOS; _eos++)
diff --git a/src/vnet/srmpls/sr_mpls_policy.c b/src/vnet/srmpls/sr_mpls_policy.c
index 1a5ba6c6c30..4a563248ce0 100755
--- a/src/vnet/srmpls/sr_mpls_policy.c
+++ b/src/vnet/srmpls/sr_mpls_policy.c
@@ -34,9 +34,6 @@
#include <vnet/srmpls/sr_mpls.h>
#include <vnet/fib/mpls_fib.h>
#include <vnet/dpo/dpo.h>
-#include <vnet/dpo/replicate_dpo.h>
-#include <vnet/dpo/mpls_label_dpo.h>
-#include <vnet/dpo/lookup_dpo.h>
#include <vnet/ip/ip.h>
#include <vppinfra/error.h>
@@ -763,7 +760,11 @@ sr_mpls_policy_assign_endpoint_color (mpls_label_t bsid,
FIB_SOURCE_SR,
FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, paths);
- vec_add1 (path.frp_label_stack, MPLS_IETF_IMPLICIT_NULL_LABEL);
+ fib_mpls_label_t fml = {
+ .fml_value = MPLS_IETF_IMPLICIT_NULL_LABEL,
+ };
+
+ vec_add1 (path.frp_label_stack, fml);
pfx.fp_eos = MPLS_NON_EOS;
path.frp_eos = MPLS_NON_EOS;
diff --git a/src/vnet/srmpls/sr_mpls_steering.c b/src/vnet/srmpls/sr_mpls_steering.c
index 0bd34665c3d..8bb072c8812 100755
--- a/src/vnet/srmpls/sr_mpls_steering.c
+++ b/src/vnet/srmpls/sr_mpls_steering.c
@@ -301,7 +301,10 @@ compute_sr_te_automated_steering_fib_entry (mpls_sr_steering_policy_t *
if (steer_pl->vpn_label != (u32) ~ 0)
{
- vec_add1 (path.frp_label_stack, steer_pl->vpn_label);
+ fib_mpls_label_t fml = {
+ .fml_value = steer_pl->vpn_label,
+ };
+ vec_add1 (path.frp_label_stack, fml);
path.frp_eos = MPLS_NON_EOS;
}
@@ -480,7 +483,12 @@ sr_mpls_steering_policy_add (mpls_label_t bsid, u32 table_id,
fib_route_path_t *paths = NULL;
if (steer_pl->vpn_label != (u32) ~ 0)
- vec_add1 (path.frp_label_stack, steer_pl->vpn_label);
+ {
+ fib_mpls_label_t fml = {
+ .fml_value = steer_pl->vpn_label,
+ };
+ vec_add1 (path.frp_label_stack, fml);
+ }
/* FIB API calls - Recursive route through the BindingSID */
if (traffic_type == SR_STEER_IPV6)
diff --git a/src/vnet/vxlan/vxlan.api b/src/vnet/vxlan/vxlan.api
index 3a07d92f79b..dae96af6e10 100644
--- a/src/vnet/vxlan/vxlan.api
+++ b/src/vnet/vxlan/vxlan.api
@@ -28,7 +28,6 @@ option version = "1.1.0";
@param decap_next_index - Name of decap next graph node
@param vni - The VXLAN Network Identifier, uint24
*/
-
define vxlan_add_del_tunnel
{
u32 client_index;
diff --git a/test/test_bier.py b/test/test_bier.py
index ae7b46a130c..c7ec0eed127 100644
--- a/test/test_bier.py
+++ b/test/test_bier.py
@@ -6,7 +6,8 @@ import socket
from framework import VppTestCase, VppTestRunner, running_extended_tests
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
VppMplsTable, VppIpMRoute, VppMRoutePath, VppIpTable, \
- MRouteEntryFlags, MRouteItfFlags, MPLS_LABEL_INVALID, DpoProto
+ MRouteEntryFlags, MRouteItfFlags, MPLS_LABEL_INVALID, DpoProto, \
+ VppMplsLabel
from vpp_bier import *
from vpp_udp_encap import *
@@ -99,15 +100,17 @@ class TestBier(VppTestCase):
bier_routes = []
for i in range(1, max_bp+1):
nh = "10.0.%d.%d" % (i / 255, i % 255)
- nh_routes.append(VppIpRoute(self, nh, 32,
- [VppRoutePath(self.pg1.remote_ip4,
- self.pg1.sw_if_index,
- labels=[2000+i])]))
+ nh_routes.append(
+ VppIpRoute(self, nh, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[VppMplsLabel(2000+i)])]))
nh_routes[-1].add_vpp_config()
- bier_routes.append(VppBierRoute(self, bti, i,
- [VppRoutePath(nh, 0xffffffff,
- labels=[100+i])]))
+ bier_routes.append(
+ VppBierRoute(self, bti, i,
+ [VppRoutePath(nh, 0xffffffff,
+ labels=[VppMplsLabel(100+i)])]))
bier_routes[-1].add_vpp_config()
#
@@ -216,20 +219,20 @@ class TestBier(VppTestCase):
ip_route_1 = VppIpRoute(self, nh1, 32,
[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index,
- labels=[2001])])
+ labels=[VppMplsLabel(2001)])])
ip_route_2 = VppIpRoute(self, nh2, 32,
[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index,
- labels=[2002])])
+ labels=[VppMplsLabel(2002)])])
ip_route_1.add_vpp_config()
ip_route_2.add_vpp_config()
bier_route_1 = VppBierRoute(self, bti, 1,
[VppRoutePath(nh1, 0xffffffff,
- labels=[101])])
+ labels=[VppMplsLabel(101)])])
bier_route_2 = VppBierRoute(self, bti, 2,
[VppRoutePath(nh2, 0xffffffff,
- labels=[102])])
+ labels=[VppMplsLabel(102)])])
bier_route_1.add_vpp_config()
bier_route_2.add_vpp_config()
@@ -561,7 +564,7 @@ class TestBier(VppTestCase):
ip_route = VppIpRoute(self, nh1, 32,
[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index,
- labels=[2001])])
+ labels=[VppMplsLabel(2001)])])
ip_route.add_vpp_config()
udp_encap = VppUdpEncap(self, 4,
diff --git a/test/test_mpls.py b/test/test_mpls.py
index aa5e67439b7..33fed680dbe 100644
--- a/test/test_mpls.py
+++ b/test/test_mpls.py
@@ -6,7 +6,8 @@ import socket
from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
- MRouteItfFlags, MRouteEntryFlags, DpoProto, VppIpTable, VppMplsTable
+ MRouteItfFlags, MRouteEntryFlags, DpoProto, VppIpTable, VppMplsTable, \
+ VppMplsLabel, MplsLspMode
from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
from scapy.packet import Raw
@@ -25,7 +26,7 @@ def verify_filter(capture, sent):
return capture
-def verify_mpls_stack(tst, rx, mpls_labels, ttl=255, num=0):
+def verify_mpls_stack(tst, rx, mpls_labels):
# the rx'd packet has the MPLS label popped
eth = rx[Ether]
tst.assertEqual(eth.type, 0x8847)
@@ -33,12 +34,10 @@ def verify_mpls_stack(tst, rx, mpls_labels, ttl=255, num=0):
rx_mpls = rx[MPLS]
for ii in range(len(mpls_labels)):
- tst.assertEqual(rx_mpls.label, mpls_labels[ii])
- tst.assertEqual(rx_mpls.cos, 0)
- if ii == num:
- tst.assertEqual(rx_mpls.ttl, ttl)
- else:
- tst.assertEqual(rx_mpls.ttl, 255)
+ tst.assertEqual(rx_mpls.label, mpls_labels[ii].value)
+ tst.assertEqual(rx_mpls.cos, mpls_labels[ii].exp)
+ tst.assertEqual(rx_mpls.ttl, mpls_labels[ii].ttl)
+
if ii == len(mpls_labels) - 1:
tst.assertEqual(rx_mpls.s, 1)
else:
@@ -102,11 +101,11 @@ class TestMPLS(VppTestCase):
self,
src_if,
mpls_labels,
- mpls_ttl=255,
ping=0,
ip_itf=None,
dst_ip=None,
chksum=None,
+ ip_ttl=64,
n=257):
self.reset_packet_infos()
pkts = []
@@ -116,22 +115,24 @@ class TestMPLS(VppTestCase):
p = Ether(dst=src_if.local_mac, src=src_if.remote_mac)
for ii in range(len(mpls_labels)):
- if ii == len(mpls_labels) - 1:
- p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=1)
- else:
- p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=0)
+ p = p / MPLS(label=mpls_labels[ii].value,
+ ttl=mpls_labels[ii].ttl,
+ cos=mpls_labels[ii].exp)
if not ping:
if not dst_ip:
- p = (p / IP(src=src_if.local_ip4, dst=src_if.remote_ip4) /
+ p = (p / IP(src=src_if.local_ip4,
+ dst=src_if.remote_ip4,
+ ttl=ip_ttl) /
UDP(sport=1234, dport=1234) /
Raw(payload))
else:
- p = (p / IP(src=src_if.local_ip4, dst=dst_ip) /
+ p = (p / IP(src=src_if.local_ip4, dst=dst_ip, ttl=ip_ttl) /
UDP(sport=1234, dport=1234) /
Raw(payload))
else:
p = (p / IP(src=ip_itf.remote_ip4,
- dst=ip_itf.local_ip4) /
+ dst=ip_itf.local_ip4,
+ ttl=ip_ttl) /
ICMP())
if chksum:
@@ -140,39 +141,58 @@ class TestMPLS(VppTestCase):
pkts.append(p)
return pkts
- def create_stream_ip4(self, src_if, dst_ip):
+ def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0):
self.reset_packet_infos()
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if, src_if)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
- IP(src=src_if.remote_ip4, dst=dst_ip) /
+ IP(src=src_if.remote_ip4, dst=dst_ip,
+ ttl=ip_ttl, tos=ip_dscp) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
- def create_stream_labelled_ip6(self, src_if, mpls_label, mpls_ttl,
- dst_ip=None, hlim=64):
- if dst_ip is None:
- dst_ip = src_if.remote_ip6
+ def create_stream_ip6(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0):
self.reset_packet_infos()
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if, src_if)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
- MPLS(label=mpls_label, ttl=mpls_ttl) /
- IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=hlim) /
+ IPv6(src=src_if.remote_ip6, dst=dst_ip,
+ hlim=ip_ttl, tc=ip_dscp) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
- def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0):
+ def create_stream_labelled_ip6(self, src_if, mpls_labels,
+ hlim=64, dst_ip=None):
+ if dst_ip is None:
+ dst_ip = src_if.remote_ip6
+ self.reset_packet_infos()
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if, src_if)
+ payload = self.info_to_payload(info)
+ p = Ether(dst=src_if.local_mac, src=src_if.remote_mac)
+ for l in mpls_labels:
+ p = p / MPLS(label=l.value, ttl=l.ttl, cos=l.exp)
+
+ p = p / (IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=hlim) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0,
+ ip_ttl=None, ip_dscp=0):
try:
capture = verify_filter(capture, sent)
@@ -192,8 +212,12 @@ class TestMPLS(VppTestCase):
if not ping_resp:
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
- # IP processing post pop has decremented the TTL
- self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+ self.assertEqual(rx_ip.tos, ip_dscp)
+ if not ip_ttl:
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+ else:
+ self.assertEqual(rx_ip.ttl, ip_ttl)
else:
self.assertEqual(rx_ip.src, tx_ip.dst)
self.assertEqual(rx_ip.dst, tx_ip.src)
@@ -202,7 +226,7 @@ class TestMPLS(VppTestCase):
raise
def verify_capture_labelled_ip4(self, src_if, capture, sent,
- mpls_labels):
+ mpls_labels, ip_ttl=None):
try:
capture = verify_filter(capture, sent)
@@ -214,22 +238,46 @@ class TestMPLS(VppTestCase):
tx_ip = tx[IP]
rx_ip = rx[IP]
- # the MPLS TTL is copied from the IP
- verify_mpls_stack(self, rx, mpls_labels, rx_ip.ttl,
- len(mpls_labels) - 1)
+ verify_mpls_stack(self, rx, mpls_labels)
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
- # IP processing post pop has decremented the TTL
- self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+ if not ip_ttl:
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+ else:
+ self.assertEqual(rx_ip.ttl, ip_ttl)
+
+ except:
+ raise
+
+ def verify_capture_labelled_ip6(self, src_if, capture, sent,
+ mpls_labels, ip_ttl=None):
+ try:
+ capture = verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+ tx_ip = tx[IPv6]
+ rx_ip = rx[IPv6]
+
+ verify_mpls_stack(self, rx, mpls_labels)
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ if not ip_ttl:
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
+ else:
+ self.assertEqual(rx_ip.hlim, ip_ttl)
except:
raise
- def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels,
- ttl=255, top=None):
- if top is None:
- top = len(mpls_labels) - 1
+ def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels):
try:
capture = verify_filter(capture, sent)
@@ -241,8 +289,7 @@ class TestMPLS(VppTestCase):
tx_ip = tx[IP]
rx_ip = rx[IP]
- # the MPLS TTL is 255 since it enters a new tunnel
- verify_mpls_stack(self, rx, mpls_labels, ttl, top)
+ verify_mpls_stack(self, rx, mpls_labels)
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
@@ -253,7 +300,7 @@ class TestMPLS(VppTestCase):
raise
def verify_capture_labelled(self, src_if, capture, sent,
- mpls_labels, ttl=254, num=0):
+ mpls_labels):
try:
capture = verify_filter(capture, sent)
@@ -261,11 +308,12 @@ class TestMPLS(VppTestCase):
for i in range(len(capture)):
rx = capture[i]
- verify_mpls_stack(self, rx, mpls_labels, ttl, num)
+ verify_mpls_stack(self, rx, mpls_labels)
except:
raise
- def verify_capture_ip6(self, src_if, capture, sent):
+ def verify_capture_ip6(self, src_if, capture, sent,
+ ip_hlim=None, ip_dscp=0):
try:
self.assertEqual(len(capture), len(sent))
@@ -282,8 +330,12 @@ class TestMPLS(VppTestCase):
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
+ self.assertEqual(rx_ip.tc, ip_dscp)
# IP processing post pop has decremented the TTL
- self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
+ if not ip_hlim:
+ self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
+ else:
+ self.assertEqual(rx_ip.hlim, ip_hlim)
except:
raise
@@ -323,22 +375,18 @@ class TestMPLS(VppTestCase):
route_32_eos = VppMplsRoute(self, 32, 1,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
- labels=[33])])
+ labels=[VppMplsLabel(33)])])
route_32_eos.add_vpp_config()
#
# a stream that matches the route for 10.0.0.1
# PG0 is in the default table
#
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [32])
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
- self.verify_capture_labelled(self.pg0, rx, tx, [33])
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(32, ttl=32, exp=1)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled(self.pg0, rx, tx,
+ [VppMplsLabel(33, ttl=31, exp=1)])
#
# A simple MPLS xconnect - non-eos label in label out
@@ -346,22 +394,38 @@ class TestMPLS(VppTestCase):
route_32_neos = VppMplsRoute(self, 32, 0,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
- labels=[33])])
+ labels=[VppMplsLabel(33)])])
route_32_neos.add_vpp_config()
#
# a stream that matches the route for 10.0.0.1
# PG0 is in the default table
#
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [32, 99])
- self.pg0.add_stream(tx)
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(32, ttl=21, exp=7),
+ VppMplsLabel(99)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled(self.pg0, rx, tx,
+ [VppMplsLabel(33, ttl=20, exp=7),
+ VppMplsLabel(99)])
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
+ #
+ # A simple MPLS xconnect - non-eos label in label out, uniform mode
+ #
+ route_42_neos = VppMplsRoute(
+ self, 42, 0,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(43, MplsLspMode.UNIFORM)])])
+ route_42_neos.add_vpp_config()
- rx = self.pg0.get_capture()
- self.verify_capture_labelled(self.pg0, rx, tx, [33, 99])
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(42, ttl=21, exp=7),
+ VppMplsLabel(99)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled(self.pg0, rx, tx,
+ [VppMplsLabel(43, ttl=20, exp=7),
+ VppMplsLabel(99)])
#
# An MPLS xconnect - EOS label in IP out
@@ -372,26 +436,41 @@ class TestMPLS(VppTestCase):
labels=[])])
route_33_eos.add_vpp_config()
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [33])
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(33)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip4(self.pg0, rx, tx)
#
# disposed packets have an invalid IPv4 checkusm
#
- tx = self.create_stream_labelled_ip4(self.pg0, [33],
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(33)],
dst_ip=self.pg0.remote_ip4,
n=65,
chksum=1)
self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum")
#
+ # An MPLS xconnect - EOS label in IP out, uniform mode
+ #
+ route_3333_eos = VppMplsRoute(
+ self, 3333, 1,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)])])
+ route_3333_eos.add_vpp_config()
+
+ tx = self.create_stream_labelled_ip4(
+ self.pg0,
+ [VppMplsLabel(3333, ttl=55, exp=3)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_ip4(self.pg0, rx, tx, ip_ttl=54, ip_dscp=0x60)
+ tx = self.create_stream_labelled_ip4(
+ self.pg0,
+ [VppMplsLabel(3333, ttl=66, exp=4)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_ip4(self.pg0, rx, tx, ip_ttl=65, ip_dscp=0x80)
+
+ #
# An MPLS xconnect - EOS label in IPv6 out
#
route_333_eos = VppMplsRoute(
@@ -402,33 +481,18 @@ class TestMPLS(VppTestCase):
proto=DpoProto.DPO_PROTO_IP6)])
route_333_eos.add_vpp_config()
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip6(self.pg0, [333], 64)
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
+ tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(333)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6(self.pg0, rx, tx)
#
# disposed packets have an TTL expired
#
- tx = self.create_stream_labelled_ip6(self.pg0, [333], 64,
+ tx = self.create_stream_labelled_ip6(self.pg0,
+ [VppMplsLabel(333, ttl=64)],
dst_ip=self.pg1.remote_ip6,
hlim=1)
-
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip6(self.pg0, [333], 64,
- dst_ip=self.pg1.remote_ip6,
- hlim=0)
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6_icmp(self.pg0, rx, tx)
#
@@ -438,33 +502,39 @@ class TestMPLS(VppTestCase):
self, 334, 1,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- labels=[3],
+ labels=[VppMplsLabel(3)],
proto=DpoProto.DPO_PROTO_IP6)])
route_334_eos.add_vpp_config()
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip6(self.pg0, [334], 64)
- self.pg0.add_stream(tx)
+ tx = self.create_stream_labelled_ip6(self.pg0,
+ [VppMplsLabel(334, ttl=64)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_ip6(self.pg0, rx, tx)
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
+ #
+ # An MPLS xconnect - EOS label in IPv6 out w imp-null in uniform mode
+ #
+ route_335_eos = VppMplsRoute(
+ self, 335, 1,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)],
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route_335_eos.add_vpp_config()
- rx = self.pg0.get_capture()
- self.verify_capture_ip6(self.pg0, rx, tx)
+ tx = self.create_stream_labelled_ip6(
+ self.pg0,
+ [VppMplsLabel(335, ttl=27, exp=4)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_ip6(self.pg0, rx, tx, ip_hlim=26, ip_dscp=0x80)
#
# disposed packets have an TTL expired
#
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip6(self.pg0, [334], 64,
+ tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(334)],
dst_ip=self.pg1.remote_ip6,
hlim=0)
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6_icmp(self.pg0, rx, tx)
#
@@ -477,33 +547,51 @@ class TestMPLS(VppTestCase):
labels=[])])
route_33_neos.add_vpp_config()
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [33, 99])
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
- self.pg0.assert_nothing_captured(
- remark="MPLS non-EOS packets popped and forwarded")
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(33),
+ VppMplsLabel(99)])
+ self.send_and_assert_no_replies(
+ self.pg0, tx,
+ "MPLS non-EOS packets popped and forwarded")
#
# A recursive EOS x-connect, which resolves through another x-connect
+ # in pipe mode
#
route_34_eos = VppMplsRoute(self, 34, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=32,
- labels=[44, 45])])
+ labels=[VppMplsLabel(44),
+ VppMplsLabel(45)])])
route_34_eos.add_vpp_config()
- tx = self.create_stream_labelled_ip4(self.pg0, [34])
- self.pg0.add_stream(tx)
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(34, ttl=3)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled(self.pg0, rx, tx,
+ [VppMplsLabel(33),
+ VppMplsLabel(44),
+ VppMplsLabel(45, ttl=2)])
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
+ #
+ # A recursive EOS x-connect, which resolves through another x-connect
+ # in uniform mode
+ #
+ route_35_eos = VppMplsRoute(
+ self, 35, 1,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_via_label=42,
+ labels=[VppMplsLabel(44)])])
+ route_35_eos.add_vpp_config()
- rx = self.pg0.get_capture()
- self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 45], num=2)
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(35, ttl=3)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled(self.pg0, rx, tx,
+ [VppMplsLabel(43, ttl=2),
+ VppMplsLabel(44, ttl=2)])
#
# A recursive non-EOS x-connect, which resolves through another
@@ -513,19 +601,20 @@ class TestMPLS(VppTestCase):
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=32,
- labels=[44, 46])])
+ labels=[VppMplsLabel(44),
+ VppMplsLabel(46)])])
route_34_neos.add_vpp_config()
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [34, 99])
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(34, ttl=45),
+ VppMplsLabel(99)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
# it's the 2nd (counting from 0) label in the stack that is swapped
- self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 46, 99], num=2)
+ self.verify_capture_labelled(self.pg0, rx, tx,
+ [VppMplsLabel(33),
+ VppMplsLabel(44),
+ VppMplsLabel(46, ttl=44),
+ VppMplsLabel(99)])
#
# an recursive IP route that resolves through the recursive non-eos
@@ -535,18 +624,16 @@ class TestMPLS(VppTestCase):
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=34,
- labels=[55])])
+ labels=[VppMplsLabel(55)])])
ip_10_0_0_1.add_vpp_config()
- self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
- self.verify_capture_labelled_ip4(self.pg0, rx, tx, [33, 44, 46, 55])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(33),
+ VppMplsLabel(44),
+ VppMplsLabel(46),
+ VppMplsLabel(55)])
ip_10_0_0_1.remove_vpp_config()
route_34_neos.remove_vpp_config()
@@ -565,7 +652,7 @@ class TestMPLS(VppTestCase):
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
- labels=[45])])
+ labels=[VppMplsLabel(45)])])
route_10_0_0_1.add_vpp_config()
# bind a local label to the route
@@ -573,37 +660,24 @@ class TestMPLS(VppTestCase):
binding.add_vpp_config()
# non-EOS stream
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [44, 99])
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
- self.verify_capture_labelled(self.pg0, rx, tx, [45, 99])
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(44),
+ VppMplsLabel(99)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled(self.pg0, rx, tx,
+ [VppMplsLabel(45, ttl=63),
+ VppMplsLabel(99)])
# EOS stream
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [44])
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
- self.verify_capture_labelled(self.pg0, rx, tx, [45])
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(44)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled(self.pg0, rx, tx,
+ [VppMplsLabel(45, ttl=63)])
# IP stream
- self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
- self.verify_capture_labelled_ip4(self.pg0, rx, tx, [45])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [VppMplsLabel(45)])
#
# cleanup
@@ -620,22 +694,16 @@ class TestMPLS(VppTestCase):
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
- labels=[32])])
+ labels=[VppMplsLabel(32)])])
route_10_0_0_1.add_vpp_config()
#
# a stream that matches the route for 10.0.0.1
# PG0 is in the default table
#
- self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
- self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [VppMplsLabel(32)])
#
# Add a non-recursive route with a 3 out labels
@@ -643,22 +711,56 @@ class TestMPLS(VppTestCase):
route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
- labels=[32, 33, 34])])
+ labels=[VppMplsLabel(32),
+ VppMplsLabel(33),
+ VppMplsLabel(34)])])
route_10_0_0_2.add_vpp_config()
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.2",
+ ip_ttl=44, ip_dscp=0xff)
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32),
+ VppMplsLabel(33),
+ VppMplsLabel(34)],
+ ip_ttl=43)
+
#
- # a stream that matches the route for 10.0.0.1
- # PG0 is in the default table
+ # Add a non-recursive route with a single out label in uniform mode
#
- self.vapi.cli("clear trace")
- tx = self.create_stream_ip4(self.pg0, "10.0.0.2")
- self.pg0.add_stream(tx)
+ route_10_0_0_3 = VppIpRoute(
+ self, "10.0.0.3", 32,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(32,
+ mode=MplsLspMode.UNIFORM)])])
+ route_10_0_0_3.add_vpp_config()
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.3",
+ ip_ttl=54, ip_dscp=0xbe)
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32, ttl=53, exp=5)])
- rx = self.pg0.get_capture()
- self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 33, 34])
+ #
+ # Add a IPv6 non-recursive route with a single out label in
+ # uniform mode
+ #
+ route_2001_3 = VppIpRoute(
+ self, "2001::3", 128,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6,
+ labels=[VppMplsLabel(32,
+ mode=MplsLspMode.UNIFORM)])],
+ is_ip6=1)
+ route_2001_3.add_vpp_config()
+
+ tx = self.create_stream_ip6(self.pg0, "2001::3",
+ ip_ttl=54, ip_dscp=0xbe)
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled_ip6(self.pg0, rx, tx,
+ [VppMplsLabel(32, ttl=53, exp=5)])
#
# add a recursive path, with output label, via the 1 label route
@@ -666,22 +768,18 @@ class TestMPLS(VppTestCase):
route_11_0_0_1 = VppIpRoute(self, "11.0.0.1", 32,
[VppRoutePath("10.0.0.1",
0xffffffff,
- labels=[44])])
+ labels=[VppMplsLabel(44)])])
route_11_0_0_1.add_vpp_config()
#
# a stream that matches the route for 11.0.0.1, should pick up
# the label stack for 11.0.0.1 and 10.0.0.1
#
- self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "11.0.0.1")
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
- self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 44])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32),
+ VppMplsLabel(44)])
#
# add a recursive path, with 2 labels, via the 3 label route
@@ -689,23 +787,22 @@ class TestMPLS(VppTestCase):
route_11_0_0_2 = VppIpRoute(self, "11.0.0.2", 32,
[VppRoutePath("10.0.0.2",
0xffffffff,
- labels=[44, 45])])
+ labels=[VppMplsLabel(44),
+ VppMplsLabel(45)])])
route_11_0_0_2.add_vpp_config()
#
# a stream that matches the route for 11.0.0.1, should pick up
# the label stack for 11.0.0.1 and 10.0.0.1
#
- self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "11.0.0.2")
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
- self.verify_capture_labelled_ip4(
- self.pg0, rx, tx, [32, 33, 34, 44, 45])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32),
+ VppMplsLabel(33),
+ VppMplsLabel(34),
+ VppMplsLabel(44),
+ VppMplsLabel(45)])
#
# cleanup
@@ -715,16 +812,18 @@ class TestMPLS(VppTestCase):
route_10_0_0_2.remove_vpp_config()
route_10_0_0_1.remove_vpp_config()
- def test_tunnel(self):
- """ MPLS Tunnel Tests """
+ def test_tunnel_pipe(self):
+ """ MPLS Tunnel Tests - Pipe """
#
# Create a tunnel with a single out label
#
- mpls_tun = VppMPLSTunnelInterface(self,
- [VppRoutePath(self.pg0.remote_ip4,
- self.pg0.sw_if_index,
- labels=[44, 46])])
+ mpls_tun = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(44),
+ VppMplsLabel(46)])])
mpls_tun.add_vpp_config()
mpls_tun.admin_up()
@@ -744,7 +843,9 @@ class TestMPLS(VppTestCase):
self.pg_start()
rx = self.pg0.get_capture()
- self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46])
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(44),
+ VppMplsLabel(46)])
#
# add a labelled route through the new tunnel
@@ -763,35 +864,88 @@ class TestMPLS(VppTestCase):
self.pg_start()
rx = self.pg0.get_capture()
- self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46, 33],
- ttl=63, top=2)
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(44),
+ VppMplsLabel(46),
+ VppMplsLabel(33, ttl=255)])
- def test_v4_exp_null(self):
- """ MPLS V4 Explicit NULL test """
+ def test_tunnel_uniform(self):
+ """ MPLS Tunnel Tests - Uniform """
#
- # The first test case has an MPLS TTL of 0
- # all packet should be dropped
+ # Create a tunnel with a single out label
+ # The label stack is specified here from outer to inner
#
- tx = self.create_stream_labelled_ip4(self.pg0, [0], 0)
+ mpls_tun = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(44, ttl=32),
+ VppMplsLabel(46, MplsLspMode.UNIFORM)])])
+ mpls_tun.add_vpp_config()
+ mpls_tun.admin_up()
+
+ #
+ # add an unlabelled route through the new tunnel
+ #
+ route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
+ [VppRoutePath("0.0.0.0",
+ mpls_tun._sw_if_index)])
+ route_10_0_0_3.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.3", ip_ttl=24)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- self.pg0.assert_nothing_captured(remark="MPLS TTL=0 packets forwarded")
+ rx = self.pg0.get_capture()
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(44, ttl=32),
+ VppMplsLabel(46, ttl=23)])
#
- # a stream with a non-zero MPLS TTL
- # PG0 is in the default table
+ # add a labelled route through the new tunnel
#
- tx = self.create_stream_labelled_ip4(self.pg0, [0])
+ route_10_0_0_4 = VppIpRoute(
+ self, "10.0.0.4", 32,
+ [VppRoutePath("0.0.0.0",
+ mpls_tun._sw_if_index,
+ labels=[VppMplsLabel(33, ttl=47)])])
+ route_10_0_0_4.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.4")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(44, ttl=32),
+ VppMplsLabel(46, ttl=47),
+ VppMplsLabel(33, ttl=47)])
+
+ def test_v4_exp_null(self):
+ """ MPLS V4 Explicit NULL test """
+
+ #
+ # The first test case has an MPLS TTL of 0
+ # all packet should be dropped
+ #
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(0, ttl=0)])
+ self.send_and_assert_no_replies(self.pg0, tx,
+ "MPLS TTL=0 packets forwarded")
+
+ #
+ # a stream with a non-zero MPLS TTL
+ # PG0 is in the default table
+ #
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(0)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip4(self.pg0, rx, tx)
#
@@ -799,15 +953,9 @@ class TestMPLS(VppTestCase):
# PG1 is in table 1
# we are ensuring the post-pop lookup occurs in the VRF table
#
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg1, [0])
- self.pg1.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg1.get_capture()
- self.verify_capture_ip4(self.pg0, rx, tx)
+ tx = self.create_stream_labelled_ip4(self.pg1, [VppMplsLabel(0)])
+ rx = self.send_and_expect(self.pg1, tx, self.pg1)
+ self.verify_capture_ip4(self.pg1, rx, tx)
def test_v6_exp_null(self):
""" MPLS V6 Explicit NULL test """
@@ -816,14 +964,8 @@ class TestMPLS(VppTestCase):
# a stream with a non-zero MPLS TTL
# PG0 is in the default table
#
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip6(self.pg0, 2, 2)
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
+ tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(2)])
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6(self.pg0, rx, tx)
#
@@ -831,14 +973,8 @@ class TestMPLS(VppTestCase):
# PG1 is in table 1
# we are ensuring the post-pop lookup occurs in the VRF table
#
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip6(self.pg1, 2, 2)
- self.pg1.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg1.get_capture()
+ tx = self.create_stream_labelled_ip6(self.pg1, [VppMplsLabel(2)])
+ rx = self.send_and_expect(self.pg1, tx, self.pg1)
self.verify_capture_ip6(self.pg0, rx, tx)
def test_deag(self):
@@ -857,15 +993,11 @@ class TestMPLS(VppTestCase):
# ping an interface in the default table
# PG0 is in the default table
#
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [34], ping=1,
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(34)],
+ ping=1,
ip_itf=self.pg0)
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture()
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip4(self.pg0, rx, tx, ping_resp=1)
#
@@ -882,16 +1014,9 @@ class TestMPLS(VppTestCase):
# PG0 is in the default table. packet arrive labelled in the
# default table and egress unlabelled in the non-default
#
- self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(
- self.pg0, [35], ping=1, ip_itf=self.pg1)
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- packet_count = self.get_packet_count_for_if_idx(self.pg0.sw_if_index)
- rx = self.pg1.get_capture(packet_count)
+ self.pg0, [VppMplsLabel(35)], ping=1, ip_itf=self.pg1)
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)
#
@@ -902,15 +1027,11 @@ class TestMPLS(VppTestCase):
0xffffffff)])
route_36_neos.add_vpp_config()
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [36, 35],
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(36),
+ VppMplsLabel(35)],
ping=1, ip_itf=self.pg1)
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg1.get_capture(len(tx))
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)
route_36_neos.remove_vpp_config()
@@ -948,15 +1069,10 @@ class TestMPLS(VppTestCase):
# ping an interface in the default table
# PG0 is in the default table
#
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [34], n=257,
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(34)],
dst_ip="10.0.0.1")
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg1.get_capture(257)
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
self.verify_capture_ip4(self.pg1, rx, tx)
def test_mcast_mid_point(self):
@@ -976,17 +1092,18 @@ class TestMPLS(VppTestCase):
# Add a mcast entry that replicate to pg2 and pg3
# and replicate to a interface-rx (like a bud node would)
#
- route_3400_eos = VppMplsRoute(self, 3400, 1,
- [VppRoutePath(self.pg2.remote_ip4,
- self.pg2.sw_if_index,
- labels=[3401]),
- VppRoutePath(self.pg3.remote_ip4,
- self.pg3.sw_if_index,
- labels=[3402]),
- VppRoutePath("0.0.0.0",
- self.pg1.sw_if_index,
- is_interface_rx=1)],
- is_multicast=1)
+ route_3400_eos = VppMplsRoute(
+ self, 3400, 1,
+ [VppRoutePath(self.pg2.remote_ip4,
+ self.pg2.sw_if_index,
+ labels=[VppMplsLabel(3401)]),
+ VppRoutePath(self.pg3.remote_ip4,
+ self.pg3.sw_if_index,
+ labels=[VppMplsLabel(3402)]),
+ VppRoutePath("0.0.0.0",
+ self.pg1.sw_if_index,
+ is_interface_rx=1)],
+ is_multicast=1)
route_3400_eos.add_vpp_config()
#
@@ -994,7 +1111,9 @@ class TestMPLS(VppTestCase):
# PG0 is in the default table
#
self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [3400], n=257,
+ tx = self.create_stream_labelled_ip4(self.pg0,
+ [VppMplsLabel(3400, ttl=64)],
+ n=257,
dst_ip="10.0.0.1")
self.pg0.add_stream(tx)
@@ -1005,9 +1124,11 @@ class TestMPLS(VppTestCase):
self.verify_capture_ip4(self.pg1, rx, tx)
rx = self.pg2.get_capture(257)
- self.verify_capture_labelled(self.pg2, rx, tx, [3401])
+ self.verify_capture_labelled(self.pg2, rx, tx,
+ [VppMplsLabel(3401, ttl=63)])
rx = self.pg3.get_capture(257)
- self.verify_capture_labelled(self.pg3, rx, tx, [3402])
+ self.verify_capture_labelled(self.pg3, rx, tx,
+ [VppMplsLabel(3402, ttl=63)])
def test_mcast_head(self):
""" MPLS Multicast Head-end """
@@ -1015,14 +1136,15 @@ class TestMPLS(VppTestCase):
#
# Create a multicast tunnel with two replications
#
- mpls_tun = VppMPLSTunnelInterface(self,
- [VppRoutePath(self.pg2.remote_ip4,
- self.pg2.sw_if_index,
- labels=[42]),
- VppRoutePath(self.pg3.remote_ip4,
- self.pg3.sw_if_index,
- labels=[43])],
- is_multicast=1)
+ mpls_tun = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg2.remote_ip4,
+ self.pg2.sw_if_index,
+ labels=[VppMplsLabel(42)]),
+ VppRoutePath(self.pg3.remote_ip4,
+ self.pg3.sw_if_index,
+ labels=[VppMplsLabel(43)])],
+ is_multicast=1)
mpls_tun.add_vpp_config()
mpls_tun.admin_up()
@@ -1042,9 +1164,9 @@ class TestMPLS(VppTestCase):
self.pg_start()
rx = self.pg2.get_capture(257)
- self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [42])
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(42)])
rx = self.pg3.get_capture(257)
- self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [43])
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(43)])
#
# An an IP multicast route via the tunnel
@@ -1070,9 +1192,9 @@ class TestMPLS(VppTestCase):
self.pg_start()
rx = self.pg2.get_capture(257)
- self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [42])
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(42)])
rx = self.pg3.get_capture(257)
- self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [43])
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(43)])
def test_mcast_ip4_tail(self):
""" MPLS IPv4 Multicast Tail """
@@ -1112,7 +1234,7 @@ class TestMPLS(VppTestCase):
# Drop due to interface lookup miss
#
self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [34],
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
dst_ip="232.1.1.1", n=1)
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop none")
@@ -1121,30 +1243,24 @@ class TestMPLS(VppTestCase):
#
route_232_1_1_1.update_rpf_id(55)
- self.vapi.cli("clear trace")
- tx = self.create_stream_labelled_ip4(self.pg0, [34],
- dst_ip="232.1.1.1", n=257)
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg1.get_capture(257)
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
+ dst_ip="232.1.1.1")
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
self.verify_capture_ip4(self.pg1, rx, tx)
#
# disposed packets have an invalid IPv4 checkusm
#
- tx = self.create_stream_labelled_ip4(self.pg0, [34],
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
dst_ip="232.1.1.1", n=65,
chksum=1)
self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum")
#
- # set the RPF-ID of the enrtry to not match the input packet's
+ # set the RPF-ID of the entry to not match the input packet's
#
route_232_1_1_1.update_rpf_id(56)
- tx = self.create_stream_labelled_ip4(self.pg0, [34],
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
dst_ip="232.1.1.1")
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
@@ -1188,42 +1304,36 @@ class TestMPLS(VppTestCase):
#
# Drop due to interface lookup miss
#
- tx = self.create_stream_labelled_ip6(self.pg0, [34], 255,
+ tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(34)],
dst_ip="ff01::1")
+ self.send_and_assert_no_replies(self.pg0, tx, "RPF Miss")
#
# set the RPF-ID of the enrtry to match the input packet's
#
route_ff.update_rpf_id(55)
- tx = self.create_stream_labelled_ip6(self.pg0, [34], 255,
+ tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(34)],
dst_ip="ff01::1")
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg1.get_capture(257)
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
self.verify_capture_ip6(self.pg1, rx, tx)
#
# disposed packets have hop-limit = 1
#
- tx = self.create_stream_labelled_ip6(self.pg0, [34], 255,
- dst_ip="ff01::1", hlim=1)
- self.pg0.add_stream(tx)
-
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx = self.pg0.get_capture(257)
+ tx = self.create_stream_labelled_ip6(self.pg0,
+ [VppMplsLabel(34)],
+ dst_ip="ff01::1",
+ hlim=1)
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6_icmp(self.pg0, rx, tx)
#
# set the RPF-ID of the enrtry to not match the input packet's
#
route_ff.update_rpf_id(56)
- tx = self.create_stream_labelled_ip6(self.pg0, [34], 225,
+ tx = self.create_stream_labelled_ip6(self.pg0,
+ [VppMplsLabel(34)],
dst_ip="ff01::1")
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
@@ -1709,11 +1819,7 @@ class TestMPLSL2(VppTestCase):
self.pg0.admin_down()
super(TestMPLSL2, self).tearDown()
- def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels,
- ttl=255, top=None):
- if top is None:
- top = len(mpls_labels) - 1
-
+ def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels):
capture = verify_filter(capture, sent)
self.assertEqual(len(capture), len(sent))
@@ -1723,7 +1829,7 @@ class TestMPLSL2(VppTestCase):
rx = capture[i]
# the MPLS TTL is 255 since it enters a new tunnel
- verify_mpls_stack(self, rx, mpls_labels, ttl, top)
+ verify_mpls_stack(self, rx, mpls_labels)
tx_eth = tx[Ether]
rx_eth = Ether(str(rx[MPLS].payload))
@@ -1736,12 +1842,15 @@ class TestMPLSL2(VppTestCase):
#
# Create an MPLS tunnel that pushes 1 label
+ # For Ethernet over MPLS the uniform mode is irrelevant since ttl/cos
+ # information is not in the packet, but we test it works anyway
#
- mpls_tun_1 = VppMPLSTunnelInterface(self,
- [VppRoutePath(self.pg0.remote_ip4,
- self.pg0.sw_if_index,
- labels=[42])],
- is_l2=1)
+ mpls_tun_1 = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(42, MplsLspMode.UNIFORM)])],
+ is_l2=1)
mpls_tun_1.add_vpp_config()
mpls_tun_1.admin_up()
@@ -1778,37 +1887,32 @@ class TestMPLSL2(VppTestCase):
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
- self.pg0.add_stream(pcore * 65)
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
+ tx0 = pcore * 65
+ rx0 = self.send_and_expect(self.pg0, tx0, self.pg1)
+ payload = pcore[MPLS].payload
- rx0 = self.pg1.get_capture(65)
- tx = pcore[MPLS].payload
-
- self.assertEqual(rx0[0][Ether].dst, tx[Ether].dst)
- self.assertEqual(rx0[0][Ether].src, tx[Ether].src)
+ self.assertEqual(rx0[0][Ether].dst, payload[Ether].dst)
+ self.assertEqual(rx0[0][Ether].src, payload[Ether].src)
#
# Inject a packet from the custoer/L2 side
#
- self.pg1.add_stream(tx * 65)
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
-
- rx0 = self.pg0.get_capture(65)
+ tx1 = pcore[MPLS].payload * 65
+ rx1 = self.send_and_expect(self.pg1, tx1, self.pg0)
- self.verify_capture_tunneled_ethernet(rx0, tx*65, [42])
+ self.verify_capture_tunneled_ethernet(rx1, tx1, [VppMplsLabel(42)])
def test_vpls(self):
""" Virtual Private LAN Service """
#
# Create an L2 MPLS tunnel
#
- mpls_tun = VppMPLSTunnelInterface(self,
- [VppRoutePath(self.pg0.remote_ip4,
- self.pg0.sw_if_index,
- labels=[42])],
- is_l2=1)
+ mpls_tun = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(42)])],
+ is_l2=1)
mpls_tun.add_vpp_config()
mpls_tun.admin_up()
@@ -1875,7 +1979,8 @@ class TestMPLSL2(VppTestCase):
rx0 = self.pg0.get_capture(65)
- self.verify_capture_tunneled_ethernet(rx0, p_cust*65, [42])
+ self.verify_capture_tunneled_ethernet(rx0, p_cust*65,
+ [VppMplsLabel(42)])
#
# remove interfaces from customers bridge-domain
diff --git a/test/test_udp.py b/test/test_udp.py
index 68b023c5e2f..322d8133b0d 100644
--- a/test/test_udp.py
+++ b/test/test_udp.py
@@ -2,7 +2,7 @@
from framework import VppTestCase, VppTestRunner
from vpp_udp_encap import *
-from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, VppMplsLabel
from scapy.packet import Raw
from scapy.layers.l2 import Ether, ARP
@@ -67,12 +67,12 @@ class TestUdpEncap(VppTestCase):
self.assertEqual(rx[UDP].dport, encap_obj.dst_port)
def validate_inner4(self, rx, tx, ttl=None):
- self.assertEqual(rx.src, tx[IP].src)
- self.assertEqual(rx.dst, tx[IP].dst)
+ self.assertEqual(rx[IP].src, tx[IP].src)
+ self.assertEqual(rx[IP].dst, tx[IP].dst)
if ttl:
- self.assertEqual(rx.ttl, ttl)
+ self.assertEqual(rx[IP].ttl, ttl)
else:
- self.assertEqual(rx.ttl, tx[IP].ttl)
+ self.assertEqual(rx[IP].ttl, tx[IP].ttl)
def validate_inner6(self, rx, tx):
self.assertEqual(rx.src, tx[IPv6].src)
@@ -208,7 +208,7 @@ class TestUdpEncap(VppTestCase):
0xFFFFFFFF,
is_udp_encap=1,
next_hop_id=1,
- labels=[66])])
+ labels=[VppMplsLabel(66)])])
route_4oMPLSo4.add_vpp_config()
p_4omo4 = (Ether(src=self.pg0.remote_mac,
diff --git a/test/vpp_bier.py b/test/vpp_bier.py
index ef9a9ab75ec..7566b1f95ab 100644
--- a/test/vpp_bier.py
+++ b/test/vpp_bier.py
@@ -4,7 +4,7 @@
import socket
from vpp_object import VppObject
-from vpp_ip_route import MPLS_LABEL_INVALID, VppRoutePath
+from vpp_ip_route import MPLS_LABEL_INVALID, VppRoutePath, VppMplsLabel
class BIER_HDR_PAYLOAD:
@@ -120,11 +120,34 @@ class VppBierRoute(VppObject):
self.bp = bp
self.paths = paths
+ def encode_paths(self):
+ br_paths = []
+ for p in self.paths:
+ lstack = []
+ for l in p.nh_labels:
+ if type(l) == VppMplsLabel:
+ lstack.append(l.encode())
+ else:
+ lstack.append({'label': l, 'ttl': 255})
+ n_labels = len(lstack)
+ while (len(lstack) < 16):
+ lstack.append({})
+ br_paths.append({'next_hop': p.nh_addr,
+ 'weight': 1,
+ 'afi': 0,
+ 'preference': 0,
+ 'table_id': p.nh_table_id,
+ 'next_hop_id': p.next_hop_id,
+ 'is_udp_encap': p.is_udp_encap,
+ 'n_labels': n_labels,
+ 'label_stack': lstack})
+ return br_paths
+
def add_vpp_config(self):
self._test.vapi.bier_route_add_del(
self.tbl_id,
self.bp,
- self.paths,
+ self.encode_paths(),
is_add=1)
self._test.registry.register(self, self._test.logger)
@@ -132,7 +155,7 @@ class VppBierRoute(VppObject):
self._test.vapi.bier_route_add_del(
self.tbl_id,
self.bp,
- self.paths,
+ self.encode_paths(),
is_add=0)
def __str__(self):
diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py
index 2d34f55efdf..ca0ae1ad47d 100644
--- a/test/vpp_ip_route.py
+++ b/test/vpp_ip_route.py
@@ -38,6 +38,11 @@ class DpoProto:
DPO_PROTO_NSH = 5
+class MplsLspMode:
+ PIPE = 0
+ UNIFORM = 1
+
+
def find_route(test, ip_addr, len, table_id=0, inet=AF_INET):
if inet == AF_INET:
s = 4
@@ -95,6 +100,21 @@ class VppIpTable(VppObject):
self.table_id))
+class VppMplsLabel(object):
+ def __init__(self, value, mode=MplsLspMode.PIPE, ttl=64, exp=0):
+ self.value = value
+ self.mode = mode
+ self.ttl = ttl
+ self.exp = exp
+
+ def encode(self):
+ is_uniform = 0 if self.mode is MplsLspMode.PIPE else 1
+ return {'label': self.value,
+ 'ttl': self.ttl,
+ 'exp': self.exp,
+ 'is_uniform': is_uniform}
+
+
class VppRoutePath(object):
def __init__(
@@ -138,6 +158,16 @@ class VppRoutePath(object):
self.next_hop_id = next_hop_id
self.is_dvr = is_dvr
+ def encode_labels(self):
+ lstack = []
+ for l in self.nh_labels:
+ if type(l) == VppMplsLabel:
+ lstack.append(l.encode())
+ else:
+ lstack.append({'label': l,
+ 'ttl': 255})
+ return lstack
+
class VppMRoutePath(VppRoutePath):
@@ -195,15 +225,16 @@ class VppIpRoute(VppObject):
is_ipv6=self.is_ip6)
else:
for path in self.paths:
+ lstack = path.encode_labels()
+
self._test.vapi.ip_add_del_route(
self.dest_addr,
self.dest_addr_len,
path.nh_addr,
path.nh_itf,
table_id=self.table_id,
- next_hop_out_label_stack=path.nh_labels,
- next_hop_n_out_labels=len(
- path.nh_labels),
+ next_hop_out_label_stack=lstack,
+ next_hop_n_out_labels=len(lstack),
next_hop_via_label=path.nh_via_label,
next_hop_table_id=path.nh_table_id,
next_hop_id=path.next_hop_id,
@@ -513,6 +544,8 @@ class VppMplsRoute(VppObject):
def add_vpp_config(self):
is_multipath = len(self.paths) > 1
for path in self.paths:
+ lstack = path.encode_labels()
+
self._test.vapi.mpls_route_add_del(
self.local_label,
self.eos_bit,
@@ -524,9 +557,8 @@ class VppMplsRoute(VppObject):
table_id=self.table_id,
is_interface_rx=path.is_interface_rx,
is_rpf_id=path.is_rpf_id,
- next_hop_out_label_stack=path.nh_labels,
- next_hop_n_out_labels=len(
- path.nh_labels),
+ next_hop_out_label_stack=lstack,
+ next_hop_n_out_labels=len(lstack),
next_hop_via_label=path.nh_via_label,
next_hop_table_id=path.nh_table_id)
self._test.registry.register(self, self._test.logger)
diff --git a/test/vpp_mpls_tunnel_interface.py b/test/vpp_mpls_tunnel_interface.py
index 0542b05c05f..c789c3f1dd0 100644
--- a/test/vpp_mpls_tunnel_interface.py
+++ b/test/vpp_mpls_tunnel_interface.py
@@ -1,6 +1,6 @@
from vpp_interface import VppInterface
-from vpp_ip_route import VppRoutePath
+from vpp_ip_route import VppRoutePath, VppMplsLabel
import socket
@@ -21,6 +21,8 @@ class VppMPLSTunnelInterface(VppInterface):
def add_vpp_config(self):
self._sw_if_index = 0xffffffff
for path in self.t_paths:
+ lstack = path.encode_labels()
+
reply = self.test.vapi.mpls_tunnel_add_del(
self._sw_if_index,
1, # IPv4 next-hop
@@ -28,8 +30,8 @@ class VppMPLSTunnelInterface(VppInterface):
path.nh_itf,
path.nh_table_id,
path.weight,
- next_hop_out_label_stack=path.nh_labels,
- next_hop_n_out_labels=len(path.nh_labels),
+ next_hop_out_label_stack=lstack,
+ next_hop_n_out_labels=len(lstack),
is_multicast=self.is_multicast,
l2_only=self.is_l2)
self._sw_if_index = reply.sw_if_index
diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py
index 99e320bfe60..18bb1f60d40 100644
--- a/test/vpp_papi_provider.py
+++ b/test/vpp_papi_provider.py
@@ -1119,7 +1119,6 @@ class VppPapiProvider(object):
:param next_hop_weight: (Default value = 1)
"""
-
return self.api(
self.papi.mpls_route_add_del,
{'mr_label': label,
@@ -2875,25 +2874,14 @@ class VppPapiProvider(object):
paths,
is_add=1):
""" BIER Route add/del """
- br_paths = []
- for p in paths:
- br_paths.append({'next_hop': p.nh_addr,
- 'weight': 1,
- 'afi': 0,
- 'preference': 0,
- 'table_id': p.nh_table_id,
- 'next_hop_id': p.next_hop_id,
- 'is_udp_encap': p.is_udp_encap,
- 'n_labels': len(p.nh_labels),
- 'label_stack': p.nh_labels})
return self.api(
self.papi.bier_route_add_del,
{'br_tbl_id': {"bt_set": bti.set_id,
"bt_sub_domain": bti.sub_domain_id,
"bt_hdr_len_id": bti.hdr_len_id},
'br_bp': bp,
- 'br_n_paths': len(br_paths),
- 'br_paths': br_paths,
+ 'br_n_paths': len(paths),
+ 'br_paths': paths,
'br_is_add': is_add})
def bier_route_dump(self, bti):
@@ -2950,6 +2938,9 @@ class VppPapiProvider(object):
next_hop_is_ip4=1,
is_add=1):
""" BIER Route add/del """
+ lstack = []
+ while (len(lstack) < 16):
+ lstack.append({})
return self.api(
self.papi.bier_disp_entry_add_del,
{'bde_tbl_id': bdti,
@@ -2961,7 +2952,7 @@ class VppPapiProvider(object):
'afi': next_hop_afi,
'rpf_id': next_hop_rpf_id,
'n_labels': 0,
- 'label_stack': [0]}],
+ 'label_stack': lstack}],
'bde_is_add': is_add})
def bier_disp_entry_dump(self, bdti):