diff options
author | marek zavodsky <mazavods@gmail.com> | 2016-06-21 05:35:16 +0200 |
---|---|---|
committer | Damjan Marion <damarion@cisco.com> | 2016-06-23 14:22:12 +0000 |
commit | 2c21a9aa887d52840bfefe60ca36a4a6941217b0 (patch) | |
tree | 48f2223fd15e2e1823355482f4538c5ea96926bb | |
parent | 324112fad06e0461958f22827f944595466e1891 (diff) |
VPP-72 Added api call aquivalents to:
"show mpls fib" -> "mpls_fib_encap_dump" and "mpls_fib_decap_dump"
"show mpls tunnel" -> "mpls_eth_tunnel_dump [tunnel-index <tunnel-id>]" and
"mpls_gre_tunnel_dump [tunnel-index <tunnel-id>]"
Change-Id: I59699039392f06dc61f62a015d07186a91cfaf45
Signed-off-by: marek zavodsky <mazavods@gmail.com>
-rw-r--r-- | vnet/vnet/mpls-gre/mpls.c | 14 | ||||
-rw-r--r-- | vnet/vnet/mpls-gre/mpls.h | 17 | ||||
-rw-r--r-- | vpp-api-test/vat/api_format.c | 311 | ||||
-rw-r--r-- | vpp/api/api.c | 359 | ||||
-rw-r--r-- | vpp/api/custom_dump.c | 50 | ||||
-rw-r--r-- | vpp/api/vpe.api | 138 |
6 files changed, 874 insertions, 15 deletions
diff --git a/vnet/vnet/mpls-gre/mpls.c b/vnet/vnet/mpls-gre/mpls.c index 067f799c611..4b36c5dc34d 100644 --- a/vnet/vnet/mpls-gre/mpls.c +++ b/vnet/vnet/mpls-gre/mpls.c @@ -577,15 +577,7 @@ VLIB_CLI_COMMAND (mpls_del_decap_command, static) = { .function = mpls_del_decap_command_fn, }; -typedef struct { - u32 fib_index; - u32 entry_index; - u32 dest; - u32 s_bit; - u32 label; -} show_mpls_fib_t; - -static int +int mpls_dest_cmp(void * a1, void * a2) { show_mpls_fib_t * r1 = a1; @@ -594,7 +586,7 @@ mpls_dest_cmp(void * a1, void * a2) return clib_net_to_host_u32(r1->dest) - clib_net_to_host_u32(r2->dest); } -static int +int mpls_fib_index_cmp(void * a1, void * a2) { show_mpls_fib_t * r1 = a1; @@ -603,7 +595,7 @@ mpls_fib_index_cmp(void * a1, void * a2) return r1->fib_index - r2->fib_index; } -static int +int mpls_label_cmp(void * a1, void * a2) { show_mpls_fib_t * r1 = a1; diff --git a/vnet/vnet/mpls-gre/mpls.h b/vnet/vnet/mpls-gre/mpls.h index cd487b2f59d..d8ffca22793 100644 --- a/vnet/vnet/mpls-gre/mpls.h +++ b/vnet/vnet/mpls-gre/mpls.h @@ -229,4 +229,21 @@ typedef struct { u8 * format_mpls_eth_tx_trace (u8 * s, va_list * args); +typedef struct { + u32 fib_index; + u32 entry_index; + u32 dest; + u32 s_bit; + u32 label; +} show_mpls_fib_t; + +int +mpls_dest_cmp(void * a1, void * a2); + +int +mpls_fib_index_cmp(void * a1, void * a2); + +int +mpls_label_cmp(void * a1, void * a2); + #endif /* included_vnet_mpls_gre_h */ diff --git a/vpp-api-test/vat/api_format.c b/vpp-api-test/vat/api_format.c index c992d19758c..48e924a9e8e 100644 --- a/vpp-api-test/vat/api_format.c +++ b/vpp-api-test/vat/api_format.c @@ -34,6 +34,7 @@ #include <vnet/l2/l2_classify.h> #include <vnet/l2/l2_vtr.h> #include <vnet/classify/input_acl.h> +#include <vnet/mpls-gre/mpls.h> #if DPDK > 0 #include <vnet/ipsec/ipsec.h> #include <vnet/ipsec/ikev2.h> @@ -2647,7 +2648,11 @@ _(AF_PACKET_DELETE_REPLY, af_packet_delete_reply) \ _(POLICER_ADD_DEL_REPLY, policer_add_del_reply) \ _(POLICER_DETAILS, policer_details) \ _(NETMAP_CREATE_REPLY, netmap_create_reply) \ -_(NETMAP_DELETE_REPLY, netmap_delete_reply) +_(NETMAP_DELETE_REPLY, netmap_delete_reply) \ +_(MPLS_GRE_TUNNEL_DETAILS, mpls_gre_tunnel_details) \ +_(MPLS_ETH_TUNNEL_DETAILS, mpls_eth_tunnel_details) \ +_(MPLS_FIB_ENCAP_DETAILS, mpls_fib_encap_details) \ +_(MPLS_FIB_DECAP_DETAILS, mpls_fib_decap_details) /* M: construct, but don't yet send a message */ @@ -11019,6 +11024,304 @@ api_netmap_delete (vat_main_t * vam) return 0; } +static void vl_api_mpls_gre_tunnel_details_t_handler +(vl_api_mpls_gre_tunnel_details_t * mp) +{ + vat_main_t * vam = &vat_main; + i32 i; + i32 len = ntohl(mp->nlabels); + + if (mp->l2_only == 0) { + fformat(vam->ofp, "[%d]: src %U, dst %U, adj %U/%d, labels ", + ntohl(mp->tunnel_index), + format_ip4_address, &mp->tunnel_src, + format_ip4_address, &mp->tunnel_dst, + format_ip4_address, &mp->intfc_address, + ntohl(mp->mask_width)); + for (i = 0; i < len; i++) { + fformat(vam->ofp, "%u ", ntohl(mp->labels[i])); + } + fformat(vam->ofp, "\n"); + fformat(vam->ofp, " inner fib index %d, outer fib index %d\n", + ntohl(mp->inner_fib_index), ntohl(mp->outer_fib_index)); + } else { + fformat(vam->ofp, "[%d]: src %U, dst %U, key %U, labels ", + ntohl(mp->tunnel_index), + format_ip4_address, &mp->tunnel_src, + format_ip4_address, &mp->tunnel_dst, + format_ip4_address, &mp->intfc_address); + for (i = 0; i < len; i++) { + fformat(vam->ofp, "%u ", ntohl(mp->labels[i])); + } + fformat(vam->ofp, "\n"); + fformat(vam->ofp, " l2 interface %d, outer fib index %d\n", + ntohl(mp->hw_if_index), ntohl(mp->outer_fib_index)); + } +} + +static void vl_api_mpls_gre_tunnel_details_t_handler_json +(vl_api_mpls_gre_tunnel_details_t * mp) +{ + vat_main_t * vam = &vat_main; + vat_json_node_t *node = NULL; + struct in_addr ip4; + i32 i; + i32 len = ntohl(mp->nlabels); + + if (VAT_JSON_ARRAY != vam->json_tree.type) { + ASSERT(VAT_JSON_NONE == vam->json_tree.type); + vat_json_init_array(&vam->json_tree); + } + node = vat_json_array_add(&vam->json_tree); + + vat_json_init_object(node); + vat_json_object_add_uint(node, "tunnel_index", ntohl(mp->tunnel_index)); + clib_memcpy(&ip4, &(mp->intfc_address), sizeof(ip4)); + vat_json_object_add_ip4(node, "intfc_address", ip4); + vat_json_object_add_uint(node, "inner_fib_index", ntohl(mp->inner_fib_index)); + vat_json_object_add_uint(node, "mask_width", ntohl(mp->mask_width)); + vat_json_object_add_uint(node, "encap_index", ntohl(mp->encap_index)); + vat_json_object_add_uint(node, "hw_if_index", ntohl(mp->hw_if_index)); + vat_json_object_add_uint(node, "l2_only", ntohl(mp->l2_only)); + clib_memcpy(&ip4, &(mp->tunnel_src), sizeof(ip4)); + vat_json_object_add_ip4(node, "tunnel_src", ip4); + clib_memcpy(&ip4, &(mp->tunnel_dst), sizeof(ip4)); + vat_json_object_add_ip4(node, "tunnel_dst", ip4); + vat_json_object_add_uint(node, "outer_fib_index", ntohl(mp->outer_fib_index)); + vat_json_object_add_uint(node, "label_count", len); + for (i = 0; i < len; i++) { + vat_json_object_add_uint(node, "label", ntohl(mp->labels[i])); + } +} + +static int api_mpls_gre_tunnel_dump (vat_main_t * vam) +{ + vl_api_mpls_gre_tunnel_dump_t *mp; + f64 timeout; + i32 index = -1; + + /* Parse args required to build the message */ + while (unformat_check_input (vam->input) != UNFORMAT_END_OF_INPUT) { + if (!unformat (vam->input, "tunnel_index %d", &index)) { + index = -1; + break; + } + } + + fformat(vam->ofp, " tunnel_index %d\n", index); + + M(MPLS_GRE_TUNNEL_DUMP, mpls_gre_tunnel_dump); + mp->tunnel_index = htonl(index); + S; + + /* Use a control ping for synchronization */ + { + vl_api_control_ping_t * mp; + M(CONTROL_PING, control_ping); + S; + } + W; +} + +static void vl_api_mpls_eth_tunnel_details_t_handler +(vl_api_mpls_eth_tunnel_details_t * mp) +{ + vat_main_t * vam = &vat_main; + i32 i; + i32 len = ntohl(mp->nlabels); + + fformat(vam->ofp, "[%d]: dst %U, adj %U/%d, labels ", + ntohl(mp->tunnel_index), + format_ethernet_address, &mp->tunnel_dst_mac, + format_ip4_address, &mp->intfc_address, + ntohl(mp->mask_width)); + for (i = 0; i < len; i++) { + fformat(vam->ofp, "%u ", ntohl(mp->labels[i])); + } + fformat(vam->ofp, "\n"); + fformat(vam->ofp, " tx on %d, rx fib index %d\n", + ntohl(mp->tx_sw_if_index), + ntohl(mp->inner_fib_index)); +} + +static void vl_api_mpls_eth_tunnel_details_t_handler_json +(vl_api_mpls_eth_tunnel_details_t * mp) +{ + vat_main_t * vam = &vat_main; + vat_json_node_t *node = NULL; + struct in_addr ip4; + i32 i; + i32 len = ntohl(mp->nlabels); + + if (VAT_JSON_ARRAY != vam->json_tree.type) { + ASSERT(VAT_JSON_NONE == vam->json_tree.type); + vat_json_init_array(&vam->json_tree); + } + node = vat_json_array_add(&vam->json_tree); + + vat_json_init_object(node); + vat_json_object_add_uint(node, "tunnel_index", ntohl(mp->tunnel_index)); + clib_memcpy(&ip4, &(mp->intfc_address), sizeof(ip4)); + vat_json_object_add_ip4(node, "intfc_address", ip4); + vat_json_object_add_uint(node, "inner_fib_index", ntohl(mp->inner_fib_index)); + vat_json_object_add_uint(node, "mask_width", ntohl(mp->mask_width)); + vat_json_object_add_uint(node, "encap_index", ntohl(mp->encap_index)); + vat_json_object_add_uint(node, "hw_if_index", ntohl(mp->hw_if_index)); + vat_json_object_add_uint(node, "l2_only", ntohl(mp->l2_only)); + vat_json_object_add_string_copy(node, "tunnel_dst_mac", + format(0, "%U", format_ethernet_address, &mp->tunnel_dst_mac)); + vat_json_object_add_uint(node, "tx_sw_if_index", ntohl(mp->tx_sw_if_index)); + vat_json_object_add_uint(node, "label_count", len); + for (i = 0; i < len; i++) { + vat_json_object_add_uint(node, "label", ntohl(mp->labels[i])); + } +} + +static int api_mpls_eth_tunnel_dump (vat_main_t * vam) +{ + vl_api_mpls_eth_tunnel_dump_t *mp; + f64 timeout; + i32 index = -1; + + /* Parse args required to build the message */ + while (unformat_check_input (vam->input) != UNFORMAT_END_OF_INPUT) { + if (!unformat (vam->input, "tunnel_index %d", &index)) { + index = -1; + break; + } + } + + fformat(vam->ofp, " tunnel_index %d\n", index); + + M(MPLS_ETH_TUNNEL_DUMP, mpls_eth_tunnel_dump); + mp->tunnel_index = htonl(index); + S; + + /* Use a control ping for synchronization */ + { + vl_api_control_ping_t * mp; + M(CONTROL_PING, control_ping); + S; + } + W; +} + +static void vl_api_mpls_fib_encap_details_t_handler +(vl_api_mpls_fib_encap_details_t * mp) +{ + vat_main_t * vam = &vat_main; + i32 i; + i32 len = ntohl(mp->nlabels); + + fformat(vam->ofp, "table %d, dest %U, label ", + ntohl(mp->fib_index), + format_ip4_address, &mp->dest, + len); + for (i = 0; i < len; i++) { + fformat(vam->ofp, "%u ", ntohl(mp->labels[i])); + } + fformat(vam->ofp, "\n"); +} + +static void vl_api_mpls_fib_encap_details_t_handler_json +(vl_api_mpls_fib_encap_details_t * mp) +{ + vat_main_t * vam = &vat_main; + vat_json_node_t *node = NULL; + i32 i; + i32 len = ntohl(mp->nlabels); + struct in_addr ip4; + + if (VAT_JSON_ARRAY != vam->json_tree.type) { + ASSERT(VAT_JSON_NONE == vam->json_tree.type); + vat_json_init_array(&vam->json_tree); + } + node = vat_json_array_add(&vam->json_tree); + + vat_json_init_object(node); + vat_json_object_add_uint(node, "table", ntohl(mp->fib_index)); + vat_json_object_add_uint(node, "entry_index", ntohl(mp->entry_index)); + clib_memcpy(&ip4, &(mp->dest), sizeof(ip4)); + vat_json_object_add_ip4(node, "dest", ip4); + vat_json_object_add_uint(node, "s_bit", ntohl(mp->s_bit)); + vat_json_object_add_uint(node, "label_count", len); + for (i = 0; i < len; i++) { + vat_json_object_add_uint(node, "label", ntohl(mp->labels[i])); + } +} + +static int api_mpls_fib_encap_dump (vat_main_t * vam) +{ + vl_api_mpls_fib_encap_dump_t *mp; + f64 timeout; + + M(MPLS_FIB_ENCAP_DUMP, mpls_fib_encap_dump); + S; + + /* Use a control ping for synchronization */ + { + vl_api_control_ping_t * mp; + M(CONTROL_PING, control_ping); + S; + } + W; +} + +static void vl_api_mpls_fib_decap_details_t_handler +(vl_api_mpls_fib_decap_details_t * mp) +{ + vat_main_t * vam = &vat_main; + + fformat(vam->ofp, "RX table %d, TX table/intfc %u, swif_tag '%s', label %u, s_bit %u\n", + ntohl(mp->rx_table_id), + ntohl(mp->tx_table_id), + mp->swif_tag, + ntohl(mp->label), + ntohl(mp->s_bit)); +} + +static void vl_api_mpls_fib_decap_details_t_handler_json +(vl_api_mpls_fib_decap_details_t * mp) +{ + vat_main_t * vam = &vat_main; + vat_json_node_t *node = NULL; + struct in_addr ip4; + + if (VAT_JSON_ARRAY != vam->json_tree.type) { + ASSERT(VAT_JSON_NONE == vam->json_tree.type); + vat_json_init_array(&vam->json_tree); + } + node = vat_json_array_add(&vam->json_tree); + + vat_json_init_object(node); + vat_json_object_add_uint(node, "table", ntohl(mp->fib_index)); + vat_json_object_add_uint(node, "entry_index", ntohl(mp->entry_index)); + clib_memcpy(&ip4, &(mp->dest), sizeof(ip4)); + vat_json_object_add_ip4(node, "dest", ip4); + vat_json_object_add_uint(node, "s_bit", ntohl(mp->s_bit)); + vat_json_object_add_uint(node, "label", ntohl(mp->label)); + vat_json_object_add_uint(node, "rx_table_id", ntohl(mp->rx_table_id)); + vat_json_object_add_uint(node, "tx_table_id", ntohl(mp->tx_table_id)); + vat_json_object_add_string_copy(node, "swif_tag", mp->swif_tag); +} + +static int api_mpls_fib_decap_dump (vat_main_t * vam) +{ + vl_api_mpls_fib_decap_dump_t *mp; + f64 timeout; + + M(MPLS_FIB_DECAP_DUMP, mpls_fib_decap_dump); + S; + + /* Use a control ping for synchronization */ + { + vl_api_control_ping_t * mp; + M(CONTROL_PING, control_ping); + S; + } + W; +} + static int q_or_quit (vat_main_t * vam) { longjmp (vam->jump_buf, 1); @@ -11515,7 +11818,11 @@ _(policer_add_del, "name <policer name> <params> [del]") \ _(policer_dump, "[name <policer name>]") \ _(netmap_create, "name <interface name> [hw-addr <mac>] [pipe] " \ "[master|slave]") \ -_(netmap_delete, "name <interface name>") +_(netmap_delete, "name <interface name>") \ +_(mpls_gre_tunnel_dump, "tunnel_index <tunnel-id>") \ +_(mpls_eth_tunnel_dump, "tunnel_index <tunnel-id>") \ +_(mpls_fib_encap_dump, "") \ +_(mpls_fib_decap_dump, "") /* List of command functions, CLI names map directly to functions */ #define foreach_cli_function \ diff --git a/vpp/api/api.c b/vpp/api/api.c index 54545cc0281..1550dc89a97 100644 --- a/vpp/api/api.c +++ b/vpp/api/api.c @@ -351,7 +351,15 @@ _(AF_PACKET_DELETE, af_packet_delete) \ _(POLICER_ADD_DEL, policer_add_del) \ _(POLICER_DUMP, policer_dump) \ _(NETMAP_CREATE, netmap_create) \ -_(NETMAP_DELETE, netmap_delete) +_(NETMAP_DELETE, netmap_delete) \ +_(MPLS_GRE_TUNNEL_DUMP, mpls_gre_tunnel_dump) \ +_(MPLS_GRE_TUNNEL_DETAILS, mpls_gre_tunnel_details) \ +_(MPLS_ETH_TUNNEL_DUMP, mpls_eth_tunnel_dump) \ +_(MPLS_ETH_TUNNEL_DETAILS, mpls_eth_tunnel_details) \ +_(MPLS_FIB_ENCAP_DUMP, mpls_fib_encap_dump) \ +_(MPLS_FIB_ENCAP_DETAILS, mpls_fib_encap_details) \ +_(MPLS_FIB_DECAP_DUMP, mpls_fib_decap_dump) \ +_(MPLS_FIB_DECAP_DETAILS, mpls_fib_decap_details) #define QUOTE_(x) #x #define QUOTE(x) QUOTE_(x) @@ -6304,6 +6312,355 @@ vl_api_netmap_delete_t_handler REPLY_MACRO(VL_API_NETMAP_DELETE_REPLY); } +static void vl_api_mpls_gre_tunnel_details_t_handler ( + vl_api_mpls_gre_tunnel_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void send_mpls_gre_tunnel_entry (vpe_api_main_t * am, + unix_shared_memory_queue_t *q, + mpls_gre_tunnel_t * gt, + u32 index, + u32 context) +{ + vl_api_mpls_gre_tunnel_details_t * mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs(VL_API_MPLS_GRE_TUNNEL_DETAILS); + mp->context = context; + + if (gt != NULL) { + mp->tunnel_index = htonl(index); + mp->tunnel_src = gt->tunnel_src.as_u32; + mp->tunnel_dst = gt->tunnel_dst.as_u32; + mp->intfc_address = gt->intfc_address.as_u32; + mp->mask_width = htonl(gt->mask_width); + mp->inner_fib_index = htonl(gt->inner_fib_index); + mp->outer_fib_index = htonl(gt->outer_fib_index); + mp->encap_index = htonl(gt->encap_index); + mp->hw_if_index = htonl(gt->hw_if_index); + mp->l2_only = htonl(gt->l2_only); + } + + mpls_main_t * mm = &mpls_main; + mpls_encap_t * e; + int i; + u32 len = 0; + + e = pool_elt_at_index (mm->encaps, gt->encap_index); + len = vec_len (e->labels); + mp->nlabels = htonl(len); + + for (i = 0; i < len; i++) { + mp->labels[i] = htonl(vnet_mpls_uc_get_label( + clib_host_to_net_u32(e->labels[i].label_exp_s_ttl))); + } + + vl_msg_api_send_shmem (q, (u8 *)&mp); +} + +static void +vl_api_mpls_gre_tunnel_dump_t_handler (vl_api_mpls_gre_tunnel_dump_t *mp) +{ + vpe_api_main_t * am = &vpe_api_main; + unix_shared_memory_queue_t * q; + vlib_main_t * vm = &vlib_global_main; + mpls_main_t * mm = &mpls_main; + mpls_gre_tunnel_t * gt; + u32 index = ntohl(mp->tunnel_index); + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + if (pool_elts (mm->gre_tunnels)) { + if(mp->tunnel_index >= 0) { + vlib_cli_output (vm, "MPLS-GRE tunnel %u", index); + gt = pool_elt_at_index (mm->gre_tunnels, index); + send_mpls_gre_tunnel_entry (am, q, gt, gt - mm->gre_tunnels, mp->context); + } else { + vlib_cli_output (vm, "MPLS-GRE tunnels"); + pool_foreach (gt, mm->gre_tunnels, + ({ + send_mpls_gre_tunnel_entry (am, q, gt, gt - mm->gre_tunnels, mp->context); + })); + } + } else { + vlib_cli_output (vm, "No MPLS-GRE tunnels"); + } +} + +static void vl_api_mpls_eth_tunnel_details_t_handler ( + vl_api_mpls_eth_tunnel_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void send_mpls_eth_tunnel_entry (vpe_api_main_t * am, + unix_shared_memory_queue_t *q, + mpls_eth_tunnel_t * et, + u32 index, + u32 context) +{ + vl_api_mpls_eth_tunnel_details_t * mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs(VL_API_MPLS_ETH_TUNNEL_DETAILS); + mp->context = context; + + if (et != NULL) { + mp->tunnel_index = htonl(index); + memcpy(mp->tunnel_dst_mac, et->tunnel_dst, 6); + mp->intfc_address = et->intfc_address.as_u32; + mp->tx_sw_if_index = htonl(et->tx_sw_if_index); + mp->inner_fib_index = htonl(et->inner_fib_index); + mp->mask_width = htonl(et->mask_width); + mp->encap_index = htonl(et->encap_index); + mp->hw_if_index = htonl(et->hw_if_index); + mp->l2_only = htonl(et->l2_only); + } + + mpls_main_t * mm = &mpls_main; + mpls_encap_t * e; + int i; + u32 len = 0; + + e = pool_elt_at_index (mm->encaps, et->encap_index); + len = vec_len (e->labels); + mp->nlabels = htonl(len); + + for (i = 0; i < len; i++) { + mp->labels[i] = htonl(vnet_mpls_uc_get_label( + clib_host_to_net_u32(e->labels[i].label_exp_s_ttl))); + } + + vl_msg_api_send_shmem (q, (u8 *)&mp); +} + +static void +vl_api_mpls_eth_tunnel_dump_t_handler (vl_api_mpls_eth_tunnel_dump_t *mp) +{ + vpe_api_main_t * am = &vpe_api_main; + unix_shared_memory_queue_t * q; + vlib_main_t * vm = &vlib_global_main; + mpls_main_t * mm = &mpls_main; + mpls_eth_tunnel_t * et; + u32 index = ntohl(mp->tunnel_index); + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + clib_warning("Received mpls_eth_tunnel_dump"); + clib_warning("Received tunnel index: %u from client %u", index, mp->client_index); + + if (pool_elts (mm->eth_tunnels)) { + if(mp->tunnel_index >= 0) { + vlib_cli_output (vm, "MPLS-Ethernet tunnel %u", index); + et = pool_elt_at_index (mm->eth_tunnels, index); + send_mpls_eth_tunnel_entry (am, q, et, et - mm->eth_tunnels, mp->context); + } else { + clib_warning("MPLS-Ethernet tunnels"); + pool_foreach (et, mm->eth_tunnels, + ({ + send_mpls_eth_tunnel_entry (am, q, et, et - mm->eth_tunnels, mp->context); + })); + } + } else { + clib_warning("No MPLS-Ethernet tunnels"); + } +} + +static void vl_api_mpls_fib_encap_details_t_handler ( + vl_api_mpls_fib_encap_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void send_mpls_fib_encap_details (vpe_api_main_t * am, + unix_shared_memory_queue_t *q, + show_mpls_fib_t *s, + u32 context) +{ + vl_api_mpls_fib_encap_details_t * mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs(VL_API_MPLS_FIB_ENCAP_DETAILS); + mp->context = context; + + mp->fib_index = htonl(s->fib_index); + mp->entry_index = htonl(s->entry_index); + mp->dest = s->dest; + mp->s_bit = htonl(s->s_bit); + + mpls_main_t * mm = &mpls_main; + mpls_encap_t * e; + int i; + u32 len = 0; + + e = pool_elt_at_index (mm->encaps, s->entry_index); + len = vec_len (e->labels); + mp->nlabels = htonl(len); + + for (i = 0; i < len; i++) { + mp->labels[i] = htonl(vnet_mpls_uc_get_label( + clib_host_to_net_u32(e->labels[i].label_exp_s_ttl))); + } + + vl_msg_api_send_shmem (q, (u8 *)&mp); +} + +static void +vl_api_mpls_fib_encap_dump_t_handler (vl_api_mpls_fib_encap_dump_t *mp) +{ + vpe_api_main_t * am = &vpe_api_main; + unix_shared_memory_queue_t * q; + vlib_main_t * vm = &vlib_global_main; + u64 key; + u32 value; + show_mpls_fib_t *records = 0; + show_mpls_fib_t *s; + mpls_main_t * mm = &mpls_main; + ip4_main_t * im = &ip4_main; + ip4_fib_t * rx_fib; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + hash_foreach (key, value, mm->mpls_encap_by_fib_and_dest, + ({ + vec_add2 (records, s, 1); + s->fib_index = (u32)(key>>32); + s->dest = (u32)(key & 0xFFFFFFFF); + s->entry_index = (u32) value; + })); + + if (0 == vec_len(records)) { + vlib_cli_output(vm, "MPLS encap table empty"); + goto out; + } + + /* sort output by dst address within fib */ + vec_sort_with_function(records, mpls_dest_cmp); + vec_sort_with_function(records, mpls_fib_index_cmp); + vlib_cli_output(vm, "MPLS encap table"); + vlib_cli_output(vm, "%=6s%=16s%=16s", "Table", "Dest address", "Labels"); + vec_foreach (s, records) + { + rx_fib = vec_elt_at_index(im->fibs, s->fib_index); + vlib_cli_output(vm, "%=6d%=16U%=16U", rx_fib->table_id, + format_ip4_address, &s->dest, format_mpls_encap_index, mm, + s->entry_index); + send_mpls_fib_encap_details (am, q, s, mp->context); + } + +out: + vec_free(records); +} + +static void vl_api_mpls_fib_decap_details_t_handler ( + vl_api_mpls_fib_decap_details_t * mp) +{ + clib_warning ("BUG"); +} + +static void send_mpls_fib_decap_details (vpe_api_main_t * am, + unix_shared_memory_queue_t *q, + show_mpls_fib_t *s, + u32 rx_table_id, + u32 tx_table_id, + char *swif_tag, + u32 context) +{ + vl_api_mpls_fib_decap_details_t * mp; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs(VL_API_MPLS_FIB_DECAP_DETAILS); + mp->context = context; + + mp->fib_index = htonl(s->fib_index); + mp->entry_index = htonl(s->entry_index); + mp->dest = s->dest; + mp->s_bit = htonl(s->s_bit); + mp->label = htonl(s->label); + mp->rx_table_id = htonl(rx_table_id); + mp->tx_table_id = htonl(tx_table_id); + strncpy ((char *) mp->swif_tag, + (char *) swif_tag, ARRAY_LEN(mp->swif_tag)-1); + + vl_msg_api_send_shmem (q, (u8 *)&mp); +} + +static void +vl_api_mpls_fib_decap_dump_t_handler (vl_api_mpls_fib_decap_dump_t *mp) +{ + vpe_api_main_t * am = &vpe_api_main; + unix_shared_memory_queue_t * q; + vlib_main_t * vm = &vlib_global_main; + u64 key; + u32 value; + show_mpls_fib_t *records = 0; + show_mpls_fib_t *s; + mpls_main_t * mm = &mpls_main; + ip4_main_t * im = &ip4_main; + ip4_fib_t * rx_fib; + ip4_fib_t *tx_fib; + u32 tx_table_id; + char *swif_tag; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + hash_foreach (key, value, mm->mpls_decap_by_rx_fib_and_label, + ({ + vec_add2 (records, s, 1); + s->fib_index = (u32)(key>>32); + s->entry_index = (u32) value; + s->label = ((u32) key)>>12; + s->s_bit = (key & (1<<8)) != 0; + })); + + if (!vec_len(records)) { + vlib_cli_output(vm, "MPLS decap table empty"); + goto out; + } + + vec_sort_with_function(records, mpls_label_cmp); + vlib_cli_output(vm, "MPLS decap table"); + vlib_cli_output(vm, "%=10s%=15s%=6s%=6s", "RX Table", "TX Table/Intfc", + "Label", "S-bit"); + vec_foreach (s, records) + { + mpls_decap_t * d; + d = pool_elt_at_index(mm->decaps, s->entry_index); + if (d->next_index == MPLS_INPUT_NEXT_IP4_INPUT) { + tx_fib = vec_elt_at_index(im->fibs, d->tx_fib_index); + tx_table_id = tx_fib->table_id; + swif_tag = " "; + } else { + tx_table_id = d->tx_fib_index; + swif_tag = "(i) "; + } + rx_fib = vec_elt_at_index(im->fibs, s->fib_index); + + vlib_cli_output(vm, "%=10d%=10d%=5s%=6d%=6d", rx_fib->table_id, + tx_table_id, swif_tag, s->label, s->s_bit); + + send_mpls_fib_decap_details (am, q, s, rx_fib->table_id, + tx_table_id, swif_tag, mp->context); + } + +out: + vec_free(records); +} + #define BOUNCE_HANDLER(nn) \ static void vl_api_##nn##_t_handler ( \ vl_api_##nn##_t *mp) \ diff --git a/vpp/api/custom_dump.c b/vpp/api/custom_dump.c index 388c7e50173..fa9dce6e5ee 100644 --- a/vpp/api/custom_dump.c +++ b/vpp/api/custom_dump.c @@ -1755,6 +1755,50 @@ static void *vl_api_sw_interface_clear_stats_t_print FINISH; } +static void *vl_api_mpls_gre_tunnel_dump_t_print +(vl_api_mpls_gre_tunnel_dump_t * mp, void *handle) +{ + u8 * s; + + s = format (0, "SCRIPT: mpls_gre_tunnel_dump "); + + s = format (s, "tunnel_index %d ", ntohl(mp->tunnel_index)); + + FINISH; +} + +static void *vl_api_mpls_eth_tunnel_dump_t_print +(vl_api_mpls_eth_tunnel_dump_t * mp, void *handle) +{ + u8 * s; + + s = format (0, "SCRIPT: mpls_eth_tunnel_dump "); + + s = format (s, "tunnel_index %d ", ntohl(mp->tunnel_index)); + + FINISH; +} + +static void *vl_api_mpls_fib_encap_dump_t_print +(vl_api_mpls_fib_encap_dump_t * mp, void *handle) +{ + u8 * s; + + s = format (0, "SCRIPT: mpls_fib_encap_dump "); + + FINISH; +} + +static void *vl_api_mpls_fib_decap_dump_t_print +(vl_api_mpls_fib_decap_dump_t * mp, void *handle) +{ + u8 * s; + + s = format (0, "SCRIPT: mpls_fib_decap_dump "); + + FINISH; +} + #define foreach_custom_print_function \ _(CREATE_LOOPBACK, create_loopback) \ _(SW_INTERFACE_SET_FLAGS, sw_interface_set_flags) \ @@ -1842,7 +1886,11 @@ _(COP_INTERFACE_ENABLE_DISABLE, cop_interface_enable_disable) \ _(COP_WHITELIST_ENABLE_DISABLE, cop_whitelist_enable_disable) \ _(AF_PACKET_CREATE, af_packet_create) \ _(AF_PACKET_DELETE, af_packet_delete) \ -_(SW_INTERFACE_CLEAR_STATS, sw_interface_clear_stats) +_(SW_INTERFACE_CLEAR_STATS, sw_interface_clear_stats) \ +_(MPLS_GRE_TUNNEL_DUMP, mpls_gre_tunnel_dump) \ +_(MPLS_ETH_TUNNEL_DUMP, mpls_eth_tunnel_dump) \ +_(MPLS_FIB_ENCAP_DUMP, mpls_fib_encap_dump) \ +_(MPLS_FIB_DECAP_DUMP, mpls_fib_decap_dump) void vl_msg_api_custom_dump_configure (api_main_t *am) { diff --git a/vpp/api/vpe.api b/vpp/api/vpe.api index 28cba3d7d11..24bd0ae6865 100644 --- a/vpp/api/vpe.api +++ b/vpp/api/vpe.api @@ -3697,3 +3697,141 @@ define netmap_delete_reply { u32 context; i32 retval; }; + +/** \brief Dump mpls gre tunnel table + @param client_index - opaque cookie to identify the sender + @param tunnel_index - gre tunnel identifier or -1 in case of all tunnels +*/ +define mpls_gre_tunnel_dump { + u32 client_index; + u32 context; + i32 tunnel_index; +}; + +/** \brief mpls gre tunnel operational state response + @param tunnel_index - gre tunnel identifier + @param intfc_address - interface ipv4 addr + @param mask_width - interface ipv4 addr mask + @param hw_if_index - interface id + @param l2_only - + @param tunnel_src - tunnel source ipv4 addr + @param tunnel_dst - tunnel destination ipv4 addr + @param outer_fib_index - gre tunnel identifier + @param encap_index - reference to mpls label table + @param nlabels - number of resolved labels + @param labels - resolved labels +*/ +manual_java define mpls_gre_tunnel_details { + u32 context; + u32 tunnel_index; + + u32 intfc_address; + u32 inner_fib_index; + u32 mask_width; + u32 encap_index; + u32 hw_if_index; + u8 l2_only; + u32 tunnel_src; + u32 tunnel_dst; + u32 outer_fib_index; + u32 nlabels; + u32 labels[0]; +}; + +/** \brief Dump mpls eth tunnel table + @param client_index - opaque cookie to identify the sender + @param tunnel_index - eth tunnel identifier or -1 in case of all tunnels +*/ +define mpls_eth_tunnel_dump { + u32 client_index; + u32 context; + i32 tunnel_index; +}; + +/** \brief mpls eth tunnel operational state response + @param tunnel_index - eth tunnel identifier + @param intfc_address - interface ipv4 addr + @param mask_width - interface ipv4 addr mask + @param hw_if_index - interface id + @param l2_only - + @param tunnel_dst_mac - + @param tx_sw_if_index - + @param encap_index - reference to mpls label table + @param nlabels - number of resolved labels + @param labels - resolved labels +*/ +manual_java define mpls_eth_tunnel_details { + u32 context; + u32 tunnel_index; + + u32 intfc_address; + u32 inner_fib_index; + u32 mask_width; + u32 encap_index; + u32 hw_if_index; + u8 l2_only; + u8 tunnel_dst_mac[6]; + u32 tx_sw_if_index; + u32 nlabels; + u32 labels[0]; +}; + +/** \brief Dump mpls fib table + @param client_index - opaque cookie to identify the sender + @param fib_index - mpls fib entry identifier or -1 in case of all entries +*/ +define mpls_fib_encap_dump { + u32 client_index; + u32 context; +}; + +/** \brief mpls fib encap table response + @param fib_index - fib table id + @param dest - destination ipv4 addr + @param s_bit - + @param entry_index - reference to mpls label table + @param nlabels - number of resolved labels + @param labels - resolved labels +*/ +manual_java define mpls_fib_encap_details { + u32 context; + + u32 fib_index; + u32 entry_index; + u32 dest; + u32 s_bit; + u32 nlabels; + u32 labels[0]; +}; + +/** \brief Dump mpls fib decap table + @param client_index - opaque cookie to identify the sender + @param fib_index - mpls fib entry identifier or -1 in case of all entries +*/ +define mpls_fib_decap_dump { + u32 client_index; + u32 context; +}; + +/** \brief mpls fib decap table response + @param fib_index - fib table id + @param entry_index - reference to mpls label table + @param dest - destination ipv4 addr + @param s_bit - + @param label - mpls labels + @param rx_table_id - rx fib id + @param tx_table_id - tx fib id + @param swif_tag - +*/ +manual_java define mpls_fib_decap_details { + u32 context; + + u32 fib_index; + u32 entry_index; + u32 dest; + u32 s_bit; + u32 label; + u32 rx_table_id; + u32 tx_table_id; + u8 swif_tag[8]; +}; |