diff options
author | Rune E. Jensen <runeerle@wgtwo.com> | 2022-11-22 10:35:03 +0100 |
---|---|---|
committer | Ole Tr�an <otroan@employees.org> | 2023-11-10 14:39:39 +0000 |
commit | f9ab6985d44651b3f92490829e8fad5bac0ceec2 (patch) | |
tree | 3c4c556d6fe52cebb79251f82b16f572e086a9b1 | |
parent | 5cc67aacf01e8c010ec64a56930f854bcd5d7669 (diff) |
gtpu: support non-G-PDU packets and PDU Session
Updated the gtpu plugin code to support the PDU Session user plane protocol, required for 5G, as
specified in 3GPP TS 38.415 version 17.0.0. This enables some initial support of 5G gNodeB's with
the gtpu plugin.
New features:
- Basic support for the GTP-U Extension Header Flag.
Packets with one extension can now be decapsulated.
This enables basic support of the PDU Session user plane protocol (3GPP TS 38.415 version 17.0.0).
New tunnels can be created with a PDU enable flag and a 6-bit QoS Flow Identifier (QFI).
With this, encapsulated packets will have the PDU Session extension header, and the QFI set.
- Ability to forward GTP-U packets that are not handled by the plugin directly.
Only GTP-U packets with a message type of 255 (G-PDU) are handled directly.
However, 3GPP TS 29.281 defines several other message types like echo and error indication.
A new feature is added to optionally forward unknown or unsupported packets to a new IP address.
This works separately for unknown GTP-U message types, unknown TEIDs, and packets with an unknown
GTP-U header.
This allows both echo and error indications from a 5G gNodeB to be handled by a different system
outside VPP.
- Simple way to get metrics for active tunnels and on tunnel close.
In 5G session/tunnel lifetime is often short and created frequently.
The normal API becomes too slow and inaccurate when too many tunnels are created and deleted
every second.
Improvements:
- A clean ground structure to handle multiple message type in the future.
The code path for G-PDU packets is optimized for performance, representing the typical case.
Unsupported GTP-U packets enter a slow path that decodes the nature of the error.
This presents a easy hook to handle other message types in the future.
- Improved error reporting
When using traces there is more details in the tunnel descriptions.
- Updated the API with several enums.
Fixes:
- gtpu0->length field in IPv6 was computed with IPv4 header lengths in the encapsulation code.
- vec_set_len (t->rewrite, ...) size was computed with the IPv4 header size also for IPv6 tunnels.
Issues:
- This PR does not enable full support of the 3GPP specification.
In particular it only supports a single QoS/QFI flow for each tunnel.
It ignores all incoming extension header flags.
- API functions might change again when/if more support of the 3GPP TS 38.415 spec is added.
Note that I have bumped the API version to 2.1.0 as it seems to be the correct approach based on
my API changes.
Type: feature
Signed-off-by: Rune E. Jensen <runeerle@wgtwo.com>
Change-Id: I91cd2b31f2561f1b3fb1e46c4c34a5a3c71b4625
-rw-r--r-- | src/plugins/gtpu/gtpu.api | 196 | ||||
-rw-r--r-- | src/plugins/gtpu/gtpu.c | 401 | ||||
-rw-r--r-- | src/plugins/gtpu/gtpu.h | 108 | ||||
-rw-r--r-- | src/plugins/gtpu/gtpu_api.c | 246 | ||||
-rw-r--r-- | src/plugins/gtpu/gtpu_decap.c | 1541 | ||||
-rw-r--r-- | src/plugins/gtpu/gtpu_encap.c | 218 | ||||
-rw-r--r-- | src/plugins/gtpu/gtpu_error.def | 2 | ||||
-rw-r--r-- | src/plugins/gtpu/gtpu_test.c | 366 |
8 files changed, 2551 insertions, 527 deletions
diff --git a/src/plugins/gtpu/gtpu.api b/src/plugins/gtpu/gtpu.api index ec4933af197..7c5c137a840 100644 --- a/src/plugins/gtpu/gtpu.api +++ b/src/plugins/gtpu/gtpu.api @@ -13,10 +13,34 @@ * limitations under the License. */ -option version = "2.0.1"; +option version = "2.1.0"; import "vnet/interface_types.api"; import "vnet/ip/ip_types.api"; +enum gtpu_forwarding_type +{ + GTPU_API_FORWARDING_NONE = 0, + GTPU_API_FORWARDING_BAD_HEADER = 1, + GTPU_API_FORWARDING_UNKNOWN_TEID = 2, + GTPU_API_FORWARDING_UNKNOWN_TYPE = 4, +}; + +enum gtpu_decap_next_type +{ + GTPU_API_DECAP_NEXT_DROP = 0, + GTPU_API_DECAP_NEXT_L2 = 1, + GTPU_API_DECAP_NEXT_IP4 = 2, + GTPU_API_DECAP_NEXT_IP6 = 3, +}; + +typedef sw_if_counters +{ + u64 packets_rx; + u64 packets_tx; + u64 bytes_rx; + u64 bytes_tx; +}; + /** \brief Create or delete a GTPU tunnel @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -56,6 +80,53 @@ define gtpu_add_del_tunnel_reply vl_api_interface_index_t sw_if_index; }; +/** \brief Create or delete a GTPU tunnel + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero, else delete + @param src_address - GTPU tunnel's source address. + @param dst_address - GTPU tunnel's destination address. + @param mcast_sw_if_index - version, O-bit and C-bit (see nsh_packet.h) + @param encap_vrf_id - fib identifier used for outgoing encapsulated packets + @param decap_next_index - the index of the next node if success + @param teid - Local (rx) Tunnel Endpoint Identifier + @param tteid - Remote (tx) Tunnel Endpoint Identifier + @param pdu_extension - add PDU session container extension to each packet + @param qfi - the QFI to set in the PDU session container, 6 bits only +*/ +define gtpu_add_del_tunnel_v2 +{ + u32 client_index; + u32 context; + bool is_add; + vl_api_address_t src_address; + vl_api_address_t dst_address; + vl_api_interface_index_t mcast_sw_if_index; + u32 encap_vrf_id; + vl_api_gtpu_decap_next_type_t decap_next_index; + u32 teid; + u32 tteid; + bool pdu_extension; + u8 qfi; + option vat_help = "src <ip-addr> {dst <ip-addr> | group <mcast-ip-addr> {<intfc> | mcast_sw_if_index <nn>}} teid <nn> [tteid <nn>] [encap-vrf-id <nn>] [decap-next <l2|nn>] [qfi <nn>] [del]"; + option in_progress; +}; + +/** \brief reply for set or delete an GTPU tunnel + @param context - sender context, to match reply w/ request + @param retval - return code + @param sw_if_index - software index of the interface + @param counters - Number of packets/bytes that is sent/received via this tunnel. Inaccurate (with in flight packets), sum for the entire set of per-thread counters. Zero for new tunnels. +*/ +define gtpu_add_del_tunnel_v2_reply +{ + u32 context; + i32 retval; + vl_api_interface_index_t sw_if_index; + vl_api_sw_if_counters_t counters; + option in_progress; +}; + /** \brief Update GTPU tunnel TX TEID @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -112,6 +183,56 @@ define gtpu_tunnel_details u32 tteid; }; + +/** \brief Dump GTPU tunnel + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - software index of the interface +*/ +define gtpu_tunnel_v2_dump +{ + u32 client_index; + u32 context; + vl_api_interface_index_t sw_if_index; + option vat_help = "[<intfc> | sw_if_index <nn>]"; + option in_progress; +}; + +/** \brief dump details of an GTPU tunnel + @param context - sender context, to match reply w/ request + @param sw_if_index - software index of the interface + @param src_address - GTPU tunnel's source address. + @param dst_address - GTPU tunnel's destination address. + @param mcast_sw_if_index - version, O-bit and C-bit (see nsh_packet.h) + @param encap_vrf_id - fib identifier used for outgoing encapsulated packets + @param decap_next_index - the index of the next node if success + @param teid - Local (rx) Tunnel Endpoint Identifier + @param tteid - Remote (tx) Tunnel Endpoint Identifier + @param pdu_extension - add PDU session container extension to each packet + @param qfi - the QFI to set in the PDU session container, 6 bits only + @param is_forwarding - tunnel used for forwarding packets + @param forwarding_type - the type of packets forwarded + @param counters - Number of packets/bytes that is sent/received via this tunnel. Inaccurate (with in flight packets), sum for the entire set of per-thread counters. +*/ +define gtpu_tunnel_v2_details +{ + u32 context; + vl_api_interface_index_t sw_if_index; + vl_api_address_t src_address; + vl_api_address_t dst_address; + vl_api_interface_index_t mcast_sw_if_index; + u32 encap_vrf_id; + vl_api_gtpu_decap_next_type_t decap_next_index; + u32 teid; + u32 tteid; + bool pdu_extension; + u8 qfi; + bool is_forwarding; + vl_api_gtpu_forwarding_type_t forwarding_type; + vl_api_sw_if_counters_t counters; + option in_progress; +}; + /** \brief Interface set gtpu-bypass request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -146,6 +267,79 @@ autoreply define gtpu_offload_rx option vat_help = "hw <intfc> rx <tunnel-name> [del]"; }; +/** \brief Set gtpu-forward request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_add - add address if non-zero, else delete + @param forwarding_type - forward filter (unknown teid, unknown message type or unknown header) + @param dst_address - forward destination address. + @param encap_vrf_id - fib identifier used for outgoing packets + @param decap_next_index - the index of the next node if success +*/ +define gtpu_add_del_forward +{ + u32 client_index; + u32 context; + bool is_add; + vl_api_address_t dst_address; + vl_api_gtpu_forwarding_type_t forwarding_type; + u32 encap_vrf_id; + vl_api_gtpu_decap_next_type_t decap_next_index; + option vat_help = "dst <ip-addr> {bad-header|unknown-teid|unknown-type} [decap-next <l2|nn>] [del]"; + option in_progress; +}; + +/** \brief reply for set or delete GTPU forwarding + @param context - sender context, to match reply w/ request + @param retval - return code + @param sw_if_index - software index of the interface +*/ +define gtpu_add_del_forward_reply +{ + u32 context; + i32 retval; + vl_api_interface_index_t sw_if_index; + option in_progress; +}; + +/** \brief Get list of metrics, use for bulk transfer. + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index_start - software index of the first interface to return data on. + @param capacity - max number of interfaces returned. +*/ +define gtpu_get_transfer_counts +{ + u32 client_index; + u32 context; + vl_api_interface_index_t sw_if_index_start; + u32 capacity; + //option vat_help = "start_index <sw_if_index> count <nn>"; + option in_progress; +}; + +/** \brief reply for set or delete GTPU forwarding + @param context - sender context, to match reply w/ request + @param retval - return code + @param count - number of tunnel counters returned, sequential starting at sw_if_index_start. + @param tunnels - Number of packets/bytes that is sent/received via this tunnel. Inaccurate (with in flight packets), sum for the entire set of per-thread counters. +*/ +typedef tunnel_metrics +{ + vl_api_interface_index_t sw_if_index; + u32 reserved; + vl_api_sw_if_counters_t counters; +}; + +define gtpu_get_transfer_counts_reply +{ + u32 context; + i32 retval; + u32 count; + vl_api_tunnel_metrics_t tunnels[count]; + option in_progress; +}; + /* * Local Variables: * eval: (c-set-style "gnu") diff --git a/src/plugins/gtpu/gtpu.c b/src/plugins/gtpu/gtpu.c index d3a2f05dd18..1307794b9e5 100644 --- a/src/plugins/gtpu/gtpu.c +++ b/src/plugins/gtpu/gtpu.c @@ -56,8 +56,13 @@ u8 * format_gtpu_encap_trace (u8 * s, va_list * args) gtpu_encap_trace_t * t = va_arg (*args, gtpu_encap_trace_t *); - s = format (s, "GTPU encap to gtpu_tunnel%d tteid %d", - t->tunnel_index, t->tteid); + s = format (s, "GTPU encap to gtpu_tunnel%d tteid %u ", t->tunnel_index, + t->tteid); + + if (t->pdu_extension) + s = format (s, "pdu-extension qfi %d ", t->qfi); + else + s = format (s, "no-pdu-extension "); return s; } @@ -95,16 +100,37 @@ format_gtpu_tunnel (u8 * s, va_list * args) is_ipv6 ? im6->fibs[t->encap_fib_index].ft_table_id : im4->fibs[t->encap_fib_index].ft_table_id; - s = format (s, "[%d] src %U dst %U teid %d tteid %d " + s = format (s, + "[%d] src %U dst %U teid %u tteid %u " "encap-vrf-id %d sw-if-idx %d ", - t - ngm->tunnels, - format_ip46_address, &t->src, IP46_TYPE_ANY, - format_ip46_address, &t->dst, IP46_TYPE_ANY, - t->teid, t->tteid, encap_vrf_id, t->sw_if_index); + t - ngm->tunnels, format_ip46_address, &t->src, IP46_TYPE_ANY, + format_ip46_address, &t->dst, IP46_TYPE_ANY, t->teid, t->tteid, + encap_vrf_id, t->sw_if_index); s = format (s, "encap-dpo-idx %d ", t->next_dpo.dpoi_index); s = format (s, "decap-next-%U ", format_decap_next, t->decap_next_index); + if (t->is_forwarding) + { + switch (t->forwarding_type) + { + case GTPU_FORWARD_BAD_HEADER: + s = format (s, "forwarding bad-header "); + break; + case GTPU_FORWARD_UNKNOWN_TEID: + s = format (s, "forwarding unknown-teid "); + break; + case GTPU_FORWARD_UNKNOWN_TYPE: + s = format (s, "forwarding unknown-type "); + break; + } + return s; + } + if (t->pdu_extension != 0) + s = format (s, "pdu-enabled qfi %d ", t->qfi); + else + s = format (s, "pdu-disabled "); + if (PREDICT_FALSE (ip46_address_is_multicast (&t->dst))) s = format (s, "mcast-sw-if-idx %d ", t->mcast_sw_if_index); @@ -224,15 +250,18 @@ const static fib_node_vft_t gtpu_vft = { .fnv_back_walk = gtpu_tunnel_back_walk, }; - -#define foreach_copy_field \ -_(teid) \ -_(tteid) \ -_(mcast_sw_if_index) \ -_(encap_fib_index) \ -_(decap_next_index) \ -_(src) \ -_(dst) +#define foreach_copy_field \ + _ (teid) \ + _ (tteid) \ + _ (mcast_sw_if_index) \ + _ (encap_fib_index) \ + _ (decap_next_index) \ + _ (src) \ + _ (dst) \ + _ (pdu_extension) \ + _ (qfi) \ + _ (is_forwarding) \ + _ (forwarding_type) static void ip_udp_gtpu_rewrite (gtpu_tunnel_t * t, bool is_ip6) @@ -251,12 +280,15 @@ ip_udp_gtpu_rewrite (gtpu_tunnel_t * t, bool is_ip6) udp_header_t *udp; gtpu_header_t *gtpu; + gtpu_ext_with_pdu_session_header_t *gtpu_ext_pdu; + i64 length_adjustment = 0; /* Fixed portion of the (outer) ip header */ if (!is_ip6) { ip4_header_t *ip = &r.h4->ip4; udp = &r.h4->udp; gtpu = &r.h4->gtpu; + gtpu_ext_pdu = &r.h4->gtpu_ext; ip->ip_version_and_header_length = 0x45; ip->ttl = 254; ip->protocol = IP_PROTOCOL_UDP; @@ -272,6 +304,7 @@ ip_udp_gtpu_rewrite (gtpu_tunnel_t * t, bool is_ip6) ip6_header_t *ip = &r.h6->ip6; udp = &r.h6->udp; gtpu = &r.h6->gtpu; + gtpu_ext_pdu = &r.h6->gtpu_ext; ip->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32 (6 << 28); ip->hop_limit = 255; @@ -290,9 +323,27 @@ ip_udp_gtpu_rewrite (gtpu_tunnel_t * t, bool is_ip6) gtpu->type = GTPU_TYPE_GTPU; gtpu->teid = clib_host_to_net_u32 (t->tteid); + if (t->pdu_extension) + { + gtpu->ver_flags = GTPU_V1_VER | GTPU_PT_GTP | GTPU_E_BIT; + gtpu->next_ext_type = GTPU_EXT_HDR_PDU_SESSION_CONTAINER; + gtpu_ext_pdu->len = 1; + gtpu_ext_pdu->pdu.oct0 = GTPU_PDU_DL_SESSION_TYPE; + gtpu_ext_pdu->pdu.oct1 = t->qfi; + gtpu_ext_pdu->next_header = 0; + } + else + { + // Remove the size of the PDU session header and the optional fields + length_adjustment = -sizeof (gtpu_ext_with_pdu_session_header_t) - 4; + } + t->rewrite = r.rw; - /* Now only support 8-byte gtpu header. TBD */ - vec_set_len (t->rewrite, sizeof (ip4_gtpu_header_t) - 4); + /* Now only support 8-byte gtpu header or 12+4-byte header. TBD */ + if (!is_ip6) + vec_set_len (t->rewrite, sizeof (ip4_gtpu_header_t) + length_adjustment); + else + vec_set_len (t->rewrite, sizeof (ip6_gtpu_header_t) + length_adjustment); return; } @@ -349,6 +400,139 @@ mcast_shared_remove (ip46_address_t * dst) hash_unset_mem_free (>pu_main.mcast_shared, dst); } +int +vnet_gtpu_add_del_forwarding (vnet_gtpu_add_mod_del_tunnel_args_t *a, + u32 *sw_if_indexp) +{ + gtpu_main_t *gtm = >pu_main; + bool is_add; + u32 current_index_value, current_index_value_ipv6; + u32 address_tabel_ipv4; + ip6_address_t address_tabel_ipv6; + u32 sw_if_index = ~0; + bool is_ip6 = !ip46_address_is_ip4 (&a->dst); + int rv; + /* Check for errors */ + if (!a->is_forwarding) + { + return VNET_API_ERROR_INVALID_ARGUMENT; + } + + switch (a->opn) + { + case GTPU_ADD_TUNNEL: + is_add = 1; + break; + case GTPU_DEL_TUNNEL: + is_add = 0; + break; + default: + return VNET_API_ERROR_INVALID_ARGUMENT; + } + + /* Check if the operation is valid, and get the current state if it is. + * Handling multiple flags at once is not supported yet. */ + switch (a->forwarding_type) + { + case GTPU_FORWARD_BAD_HEADER: + current_index_value = gtm->bad_header_forward_tunnel_index_ipv4; + current_index_value_ipv6 = gtm->bad_header_forward_tunnel_index_ipv6; + address_tabel_ipv4 = GTPU_FORWARD_BAD_HEADER_ADDRESS_IPV4; + /* ipv6 is TBD */ + ip6_address_t address_tabel_ipv6_ = GTPU_FORWARD_BAD_HEADER_ADDRESS_IPV6; + address_tabel_ipv6 = address_tabel_ipv6_; + break; + case GTPU_FORWARD_UNKNOWN_TEID: + current_index_value = gtm->unknown_teid_forward_tunnel_index_ipv4; + current_index_value_ipv6 = gtm->unknown_teid_forward_tunnel_index_ipv6; + address_tabel_ipv4 = GTPU_FORWARD_UNKNOWN_TEID_ADDRESS_IPV4; + ip6_address_t address_tabel_ipv6__ = + GTPU_FORWARD_UNKNOWN_TEID_ADDRESS_IPV6; + address_tabel_ipv6 = address_tabel_ipv6__; + break; + case GTPU_FORWARD_UNKNOWN_TYPE: + current_index_value = gtm->unknown_type_forward_tunnel_index_ipv4; + current_index_value_ipv6 = gtm->unknown_type_forward_tunnel_index_ipv6; + address_tabel_ipv4 = GTPU_FORWARD_UNKNOWN_TYPE_ADDRESS_IPV4; + ip6_address_t address_tabel_ipv6___ = + GTPU_FORWARD_UNKNOWN_TYPE_ADDRESS_IPV6; + address_tabel_ipv6 = address_tabel_ipv6___; + break; + default: + return VNET_API_ERROR_INVALID_ARGUMENT; + } + + if (is_ip6) + current_index_value = current_index_value_ipv6; + + /* Check if the existing forwarding rule state conflicts with this operation + */ + if ((is_add) && (current_index_value != ~0)) + { + return VNET_API_ERROR_TUNNEL_EXIST; + } + if (!is_add) + { + if (current_index_value == ~0) + return VNET_API_ERROR_NO_SUCH_ENTRY; + /* Clear the tunnel index before deleting the tunnel itself */ + switch (a->forwarding_type) + { + case GTPU_FORWARD_BAD_HEADER: + gtm->bad_header_forward_tunnel_index_ipv4 = ~0; + break; + case GTPU_FORWARD_UNKNOWN_TEID: + gtm->unknown_teid_forward_tunnel_index_ipv4 = ~0; + break; + case GTPU_FORWARD_UNKNOWN_TYPE: + gtm->unknown_type_forward_tunnel_index_ipv4 = ~0; + break; + } + } + + /* src is the tunnel lookup key, so it is fixed. + * dst is used for the new target */ + a->src = a->dst; + if (is_ip6) + a->dst.ip6 = address_tabel_ipv6; + else + a->dst.ip4.as_u32 = address_tabel_ipv4; + rv = vnet_gtpu_add_mod_del_tunnel (a, &sw_if_index); + + // Forward only if not nil + if (sw_if_indexp) + *sw_if_indexp = sw_if_index; + + if (rv != 0) + return rv; + + /* Update the forwarding tunnel index */ + u32 tunnel_index = is_add ? vnet_gtpu_get_tunnel_index (sw_if_index) : ~0; + switch (a->forwarding_type) + { + case GTPU_FORWARD_BAD_HEADER: + if (is_ip6) + gtm->bad_header_forward_tunnel_index_ipv6 = tunnel_index; + else + gtm->bad_header_forward_tunnel_index_ipv4 = tunnel_index; + + break; + case GTPU_FORWARD_UNKNOWN_TEID: + if (is_ip6) + gtm->unknown_teid_forward_tunnel_index_ipv6 = tunnel_index; + else + gtm->unknown_teid_forward_tunnel_index_ipv4 = tunnel_index; + break; + case GTPU_FORWARD_UNKNOWN_TYPE: + if (is_ip6) + gtm->unknown_type_forward_tunnel_index_ipv6 = tunnel_index; + else + gtm->unknown_type_forward_tunnel_index_ipv4 = tunnel_index; + break; + } + return 0; +} + int vnet_gtpu_add_mod_del_tunnel (vnet_gtpu_add_mod_del_tunnel_args_t * a, u32 * sw_if_indexp) { @@ -635,6 +819,22 @@ int vnet_gtpu_add_mod_del_tunnel return 0; } +int +get_combined_counters (u32 sw_if_index, vlib_counter_t *result_rx, + vlib_counter_t *result_tx) +{ + gtpu_main_t *gtm = >pu_main; + vnet_main_t *vnm = gtm->vnet_main; + vnet_interface_main_t *im = &vnm->interface_main; + vlib_get_combined_counter (im->combined_sw_if_counters + + VNET_INTERFACE_COUNTER_RX, + sw_if_index, result_rx); + vlib_get_combined_counter (im->combined_sw_if_counters + + VNET_INTERFACE_COUNTER_TX, + sw_if_index, result_tx); + return 0; +} + static uword get_decap_next_for_node (u32 node_index, u32 ipv4_set) { @@ -690,6 +890,11 @@ gtpu_add_del_tunnel_command_fn (vlib_main_t * vm, u32 decap_next_index = GTPU_INPUT_NEXT_L2_INPUT; u32 teid = 0, tteid = 0; u32 tmp; + /* PDU is disabled by default */ + u8 pdu_extension = 0; + u32 qfi = ~0; + u8 is_forwarding = 0; + u8 forwarding_type = 0; int rv; vnet_gtpu_add_mod_del_tunnel_args_t _a, *a = &_a; u32 tunnel_sw_if_index; @@ -768,6 +973,8 @@ gtpu_add_del_tunnel_command_fn (vlib_main_t * vm, ; else if (unformat (line_input, "upd-tteid %d", &tteid)) opn = GTPU_UPD_TTEID; + else if (unformat (line_input, "qfi %d", &qfi)) + pdu_extension = 1; else { error = clib_error_return (0, "parse error: '%U'", @@ -829,7 +1036,11 @@ gtpu_add_del_tunnel_command_fn (vlib_main_t * vm, error = clib_error_return (0, "next node not found"); goto done; } - + if (pdu_extension == 1 && qfi > 31) + { + error = clib_error_return (0, "qfi max value is 31"); + goto done; + } clib_memset (a, 0, sizeof (*a)); a->opn = opn; @@ -899,10 +1110,10 @@ done: VLIB_CLI_COMMAND (create_gtpu_tunnel_command, static) = { .path = "create gtpu tunnel", .short_help = - "create gtpu tunnel src <local-tep-addr>" - " {dst <remote-tep-addr>|group <mcast-addr> <intf-name>}" - " teid <nn> [tteid <nn>] [encap-vrf-id <nn>]" - " [decap-next [l2|ip4|ip6|node <name>]] [del | upd-tteid <nn>]", + "create gtpu tunnel src <local-tep-addr>" + " {dst <remote-tep-addr>|group <mcast-addr> <intf-name>}" + " teid <nn> [tteid <nn>] [encap-vrf-id <nn>]" + " [decap-next [l2|ip4|ip6|node <name>]] [qfi <nn>] [del | upd-tteid <nn>]", .function = gtpu_add_del_tunnel_command_fn, }; /* *INDENT-ON* */ @@ -932,7 +1143,8 @@ show_gtpu_tunnel_command_fn (vlib_main_t * vm, * @cliexpar * Example of how to display the GTPU Tunnel entries: * @cliexstart{show gtpu tunnel} - * [0] src 10.0.3.1 dst 10.0.3.3 teid 13 tx-teid 55 encap_fib_index 0 sw_if_index 5 decap_next l2 + * [0] src 10.0.3.1 dst 10.0.3.3 teid 13 tx-teid 55 encap_fib_index 0 + sw_if_index 5 decap_next l2 pdu-disabled * @cliexend ?*/ /* *INDENT-OFF* */ @@ -1242,6 +1454,139 @@ VLIB_CLI_COMMAND (gtpu_offload_command, static) = { }; /* *INDENT-ON* */ +static clib_error_t * +gtpu_forward_command_fn (vlib_main_t *vm, unformat_input_t *input, + vlib_cli_command_t *cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + u32 tunnel_sw_if_index; + clib_error_t *error = NULL; + + u32 decap_next_index = GTPU_INPUT_NEXT_L2_INPUT; + + int is_add = 1; + u8 dst_set = 0; + u8 ipv4_set = 0; + u8 ipv6_set = 0; + ip46_address_t src, dst; + u32 encap_fib_index = 0; + u32 mcast_sw_if_index = ~0; + u32 teid = 0, tteid = 0; + u32 tmp; + /* PDU is disabled by default */ + u8 pdu_extension = 0; + u32 qfi = ~0; + u8 is_forwarding = 1; + u8 forwarding_type = 0; + int rv; + vnet_gtpu_add_mod_del_tunnel_args_t _a, *a = &_a; + + /* Cant "universally zero init" (={0}) due to GCC bug 53119 */ + clib_memset (&src, 0, sizeof src); + clib_memset (&dst, 0, sizeof dst); + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "dst %U", unformat_ip4_address, &dst.ip4)) + { + dst_set = 1; + ipv4_set = 1; + } + else if (unformat (line_input, "dst %U", unformat_ip6_address, &dst.ip6)) + { + dst_set = 1; + ipv6_set = 1; + } + else if (unformat (line_input, "decap-next %U", unformat_decap_next, + &decap_next_index, ipv4_set)) + ; + else if (unformat (line_input, "encap-vrf-id %d", &tmp)) + { + encap_fib_index = fib_table_find (fib_ip_proto (ipv6_set), tmp); + if (encap_fib_index == ~0) + { + error = + clib_error_return (0, "nonexistent encap-vrf-id %d", tmp); + goto done; + } + } + else if (unformat (line_input, "del")) + is_add = 0; + else if (unformat (line_input, "bad-header")) + forwarding_type |= GTPU_FORWARD_BAD_HEADER; + else if (unformat (line_input, "unknown-teid")) + forwarding_type |= GTPU_FORWARD_UNKNOWN_TEID; + else if (unformat (line_input, "unknown-type")) + forwarding_type |= GTPU_FORWARD_UNKNOWN_TYPE; + else + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } + } + + if (!dst_set) + { + error = clib_error_return (0, "dst must be set to a valid IP address"); + goto done; + } + + a->opn = is_add ? GTPU_ADD_TUNNEL : GTPU_DEL_TUNNEL; +#define _(x) a->x = x; + foreach_copy_field; +#undef _ + + rv = vnet_gtpu_add_del_forwarding (a, &tunnel_sw_if_index); + + switch (rv) + { + case 0: + if (is_add) + vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, + vnet_get_main (), tunnel_sw_if_index); + break; + + case VNET_API_ERROR_TUNNEL_EXIST: + error = clib_error_return (0, "tunnel already exists..."); + goto done; + + case VNET_API_ERROR_NO_SUCH_ENTRY: + error = clib_error_return (0, "tunnel does not exist..."); + goto done; + + case VNET_API_ERROR_INVALID_ARGUMENT: + error = + clib_error_return (0, "one and only one of unknown-teid, unknown-type " + "or bad-header must be specified"); + goto done; + + default: + error = + clib_error_return (0, "vnet_gtpu_add_del_tunnel returned %d", rv); + goto done; + } + +done: + unformat_free (line_input); + + return error; +} + +VLIB_CLI_COMMAND (gtpu_forward_command, static) = { + .path = "create gtpu forward", + .short_help = + "create gtpu forward dst <local-tep-addr> " + "{unknown-teid|unknown-type|bad-header} " + "[decap-next [l2|ip4|ip6|node <name>]] [encap-vrf-id <nn>] [del]", + .function = gtpu_forward_command_fn, +}; + clib_error_t * gtpu_init (vlib_main_t * vm) { @@ -1264,6 +1609,14 @@ gtpu_init (vlib_main_t * vm) gtm->fib_node_type = fib_node_register_new_type ("gtpu", >pu_vft); + /* Clear forward tunnels */ + gtm->bad_header_forward_tunnel_index_ipv4 = ~0; + gtm->unknown_teid_forward_tunnel_index_ipv4 = ~0; + gtm->unknown_type_forward_tunnel_index_ipv4 = ~0; + gtm->bad_header_forward_tunnel_index_ipv6 = ~0; + gtm->unknown_teid_forward_tunnel_index_ipv6 = ~0; + gtm->unknown_type_forward_tunnel_index_ipv6 = ~0; + return 0; } diff --git a/src/plugins/gtpu/gtpu.h b/src/plugins/gtpu/gtpu.h index 72d09232001..0c224ebbfe3 100644 --- a/src/plugins/gtpu/gtpu.h +++ b/src/plugins/gtpu/gtpu.h @@ -53,21 +53,56 @@ * 12 Next Extension Header Type3) 4) **/ -typedef struct -{ +typedef CLIB_PACKED (struct { u8 ver_flags; u8 type; u16 length; /* length in octets of the data following the fixed part of the header */ u32 teid; + /* The following fields exists if and only if one or more of E, S or PN + * are 1. */ u16 sequence; u8 pdu_number; u8 next_ext_type; -} gtpu_header_t; +}) gtpu_header_t; -#define GTPU_V1_HDR_LEN 8 +typedef CLIB_PACKED (struct { + u8 type; + u8 len; + u16 pad; +}) gtpu_ext_header_t; + +/** + * DL PDU SESSION INFORMATION (PDU Type 0): + * (3GPP TS 38.415) + * Bits + * Octets 8 7 6 5 4 3 2 1 + * 1 type qmp snp spare + * 2 ppp rqi qos_fi + * + * UL PDU SESSION INFORMATION (PDU Type 1): + * Bits + * Octets 8 7 6 5 4 3 2 1 + * 1 type qmp DL d. UL d. snp + * 2 n3/n9 delay new IE qos_fi + **/ +typedef CLIB_PACKED (struct { + u8 oct0; + u8 oct1; + // Extensions are supported +}) pdu_session_container_t; + +STATIC_ASSERT_SIZEOF (pdu_session_container_t, 2); +typedef CLIB_PACKED (struct { + u8 len; + pdu_session_container_t pdu; + u8 next_header; +}) gtpu_ext_with_pdu_session_header_t; + +#define GTPU_V1_HDR_LEN 8 #define GTPU_VER_MASK (7<<5) #define GTPU_PT_BIT (1<<4) +#define GTPU_RES_BIT (1 << 3) #define GTPU_E_BIT (1<<2) #define GTPU_S_BIT (1<<1) #define GTPU_PN_BIT (1<<0) @@ -78,12 +113,42 @@ typedef struct #define GTPU_PT_GTP (1<<4) #define GTPU_TYPE_GTPU 255 +#define GTPU_EXT_HDR_PDU_SESSION_CONTAINER 133 +#define GTPU_NO_MORE_EXT_HDR 0 +#define GTPU_PDU_DL_SESSION_TYPE 0 +#define GTPU_PDU_UL_SESSION_TYPE (1 << 4) + +#define GTPU_FORWARD_BAD_HEADER (1 << 0) +#define GTPU_FORWARD_UNKNOWN_TEID (1 << 1) +#define GTPU_FORWARD_UNKNOWN_TYPE (1 << 2) + +/* the ipv4 addresses used for the forwarding tunnels. 127.0.0.127 - .129. */ +#define GTPU_FORWARD_BAD_HEADER_ADDRESS_IPV4 0x7f00007fu +#define GTPU_FORWARD_UNKNOWN_TEID_ADDRESS_IPV4 0x8000007fu +#define GTPU_FORWARD_UNKNOWN_TYPE_ADDRESS_IPV4 0x8100007fu + +/* the ipv6 addresses used for the forwarding tunnels. + * 2001:db8:ffff:ffff:ffff:ffff:ffff:fffd - + * 2001:db8:ffff:ffff:ffff:ffff:ffff:ffff*/ +#define GTPU_FORWARD_BAD_HEADER_ADDRESS_IPV6 \ + { \ + .as_u64[0] = 0xffffffffb80d0120ull, .as_u64[1] = 0xfdffffffffffffffull \ + } +#define GTPU_FORWARD_UNKNOWN_TEID_ADDRESS_IPV6 \ + { \ + .as_u64[0] = 0xffffffffb80d0120ull, .as_u64[1] = 0xfeffffffffffffffull \ + } +#define GTPU_FORWARD_UNKNOWN_TYPE_ADDRESS_IPV6 \ + { \ + .as_u64[0] = 0xffffffffb80d0120ull, .as_u64[1] = 0xffffffffffffffffull \ + } /* *INDENT-OFF* */ typedef CLIB_PACKED(struct { ip4_header_t ip4; /* 20 bytes */ udp_header_t udp; /* 8 bytes */ gtpu_header_t gtpu; /* 12 bytes */ + gtpu_ext_with_pdu_session_header_t gtpu_ext; /* 4 bytes */ }) ip4_gtpu_header_t; /* *INDENT-ON* */ @@ -92,7 +157,8 @@ typedef CLIB_PACKED(struct { ip6_header_t ip6; /* 40 bytes */ udp_header_t udp; /* 8 bytes */ - gtpu_header_t gtpu; /* 8 bytes */ + gtpu_header_t gtpu; /* 12 bytes */ + gtpu_ext_with_pdu_session_header_t gtpu_ext; /* 4 bytes */ }) ip6_gtpu_header_t; /* *INDENT-ON* */ @@ -157,6 +223,14 @@ typedef struct u32 sw_if_index; u32 hw_if_index; + /* PDU session container extension enable/disable */ + u8 pdu_extension; + u8 qfi; + + /* The tunnel is used for forwarding */ + u8 is_forwarding; + u8 forwarding_type; + /** * Linkage into the FIB object graph */ @@ -232,6 +306,19 @@ typedef struct /* API message ID base */ u16 msg_id_base; + /* Handle GTP packets of unknown type like echo and error indication, + * unknown teid or bad version/header. + * All packets will be forwarded to a new IP address, + * so that they can be processes outside vpp. + * If not set then packets are dropped. + * One of more indexes can be unused (~0). */ + u32 bad_header_forward_tunnel_index_ipv4; + u32 unknown_teid_forward_tunnel_index_ipv4; + u32 unknown_type_forward_tunnel_index_ipv4; + u32 bad_header_forward_tunnel_index_ipv6; + u32 unknown_teid_forward_tunnel_index_ipv6; + u32 unknown_type_forward_tunnel_index_ipv6; + /* convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; @@ -263,8 +350,15 @@ typedef struct u32 decap_next_index; u32 teid; /* local or rx teid */ u32 tteid; /* remote or tx teid */ + u8 pdu_extension; + u8 qfi; + u8 is_forwarding; + u8 forwarding_type; } vnet_gtpu_add_mod_del_tunnel_args_t; +int vnet_gtpu_add_del_forwarding (vnet_gtpu_add_mod_del_tunnel_args_t *a, + u32 *sw_if_indexp); + int vnet_gtpu_add_mod_del_tunnel (vnet_gtpu_add_mod_del_tunnel_args_t * a, u32 * sw_if_indexp); @@ -272,11 +366,15 @@ typedef struct { u32 tunnel_index; u32 tteid; + u8 pdu_extension; + u8 qfi; } gtpu_encap_trace_t; void vnet_int_gtpu_bypass_mode (u32 sw_if_index, u8 is_ip6, u8 is_enable); u32 vnet_gtpu_get_tunnel_index (u32 sw_if_index); int vnet_gtpu_add_del_rx_flow (u32 hw_if_index, u32 t_imdex, int is_add); +int get_combined_counters (u32 sw_if_index, vlib_counter_t *result_rx, + vlib_counter_t *result_tx); #endif /* included_vnet_gtpu_h */ diff --git a/src/plugins/gtpu/gtpu_api.c b/src/plugins/gtpu/gtpu_api.c index 77432bae4fa..1cc9fab6cd2 100644 --- a/src/plugins/gtpu/gtpu_api.c +++ b/src/plugins/gtpu/gtpu_api.c @@ -124,6 +124,10 @@ static void vl_api_gtpu_add_del_tunnel_t_handler .decap_next_index = ntohl (mp->decap_next_index), .teid = ntohl (mp->teid), .tteid = ntohl (mp->tteid), + .pdu_extension = 0, + .qfi = 0, + .is_forwarding = 0, + .forwarding_type = 0, }; ip_address_decode (&mp->dst_address, &a.dst); ip_address_decode (&mp->src_address, &a.src); @@ -154,12 +158,70 @@ static void vl_api_gtpu_add_del_tunnel_t_handler rv = vnet_gtpu_add_mod_del_tunnel (&a, &sw_if_index); out: - /* *INDENT-OFF* */ REPLY_MACRO2(VL_API_GTPU_ADD_DEL_TUNNEL_REPLY, ({ rmp->sw_if_index = ntohl (sw_if_index); })); - /* *INDENT-ON* */ +} + +static void +vl_api_gtpu_add_del_tunnel_v2_t_handler (vl_api_gtpu_add_del_tunnel_v2_t *mp) +{ + vl_api_gtpu_add_del_tunnel_v2_reply_t *rmp; + int rv = 0; + vlib_counter_t result_rx; + vlib_counter_t result_tx; + gtpu_main_t *gtm = >pu_main; + + vnet_gtpu_add_mod_del_tunnel_args_t a = { + .opn = mp->is_add ? GTPU_ADD_TUNNEL : GTPU_DEL_TUNNEL, + .mcast_sw_if_index = ntohl (mp->mcast_sw_if_index), + .decap_next_index = ntohl (mp->decap_next_index), + .teid = ntohl (mp->teid), + .tteid = ntohl (mp->tteid), + .pdu_extension = mp->pdu_extension ? 1 : 0, + .qfi = mp->qfi, + .is_forwarding = 0, + .forwarding_type = 0, + }; + ip_address_decode (&mp->dst_address, &a.dst); + ip_address_decode (&mp->src_address, &a.src); + + u8 is_ipv6 = !ip46_address_is_ip4 (&a.dst); + a.encap_fib_index = + fib_table_find (fib_ip_proto (is_ipv6), ntohl (mp->encap_vrf_id)); + if (a.encap_fib_index == ~0) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + + /* Check src & dst are different */ + if (ip46_address_cmp (&a.dst, &a.src) == 0) + { + rv = VNET_API_ERROR_SAME_SRC_DST; + goto out; + } + if (ip46_address_is_multicast (&a.dst) && + !vnet_sw_if_index_is_api_valid (a.mcast_sw_if_index)) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto out; + } + + u32 sw_if_index = ~0; + rv = vnet_gtpu_add_mod_del_tunnel (&a, &sw_if_index); + get_combined_counters (sw_if_index, &result_rx, &result_tx); + +out: + REPLY_MACRO2 ( + VL_API_GTPU_ADD_DEL_TUNNEL_V2_REPLY, ({ + rmp->sw_if_index = ntohl (sw_if_index); + rmp->counters.packets_rx = clib_net_to_host_u64 (result_rx.packets); + rmp->counters.packets_tx = clib_net_to_host_u64 (result_tx.packets); + rmp->counters.bytes_rx = clib_net_to_host_u64 (result_rx.bytes); + rmp->counters.bytes_tx = clib_net_to_host_u64 (result_tx.bytes); + })); } static void vl_api_gtpu_tunnel_update_tteid_t_handler @@ -242,7 +304,7 @@ vl_api_gtpu_tunnel_dump_t_handler (vl_api_gtpu_tunnel_dump_t * mp) pool_foreach (t, gtm->tunnels) { send_gtpu_tunnel_details(t, reg, mp->context); - } + } /* *INDENT-ON* */ } else @@ -257,6 +319,184 @@ vl_api_gtpu_tunnel_dump_t_handler (vl_api_gtpu_tunnel_dump_t * mp) } } +static void +send_gtpu_tunnel_details_v2 (gtpu_tunnel_t *t, vl_api_registration_t *reg, + u32 context) +{ + vl_api_gtpu_tunnel_v2_details_t *rmp; + vlib_counter_t result_rx; + vlib_counter_t result_tx; + gtpu_main_t *gtm = >pu_main; + ip4_main_t *im4 = &ip4_main; + ip6_main_t *im6 = &ip6_main; + u8 is_ipv6 = !ip46_address_is_ip4 (&t->dst); + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + clib_memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_GTPU_TUNNEL_V2_DETAILS + gtm->msg_id_base); + + ip_address_encode (&t->src, is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4, + &rmp->src_address); + ip_address_encode (&t->dst, is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4, + &rmp->dst_address); + + rmp->encap_vrf_id = is_ipv6 ? + htonl (im6->fibs[t->encap_fib_index].ft_table_id) : + htonl (im4->fibs[t->encap_fib_index].ft_table_id); + rmp->mcast_sw_if_index = htonl (t->mcast_sw_if_index); + rmp->teid = htonl (t->teid); + rmp->tteid = htonl (t->tteid); + rmp->decap_next_index = htonl (t->decap_next_index); + rmp->sw_if_index = htonl (t->sw_if_index); + rmp->context = context; + rmp->pdu_extension = t->pdu_extension; + rmp->qfi = t->qfi; + rmp->is_forwarding = t->is_forwarding; + rmp->forwarding_type = htonl (t->forwarding_type); + + get_combined_counters (t->sw_if_index, &result_rx, &result_tx); + rmp->counters.packets_rx = clib_net_to_host_u64 (result_rx.packets); + rmp->counters.packets_tx = clib_net_to_host_u64 (result_tx.packets); + rmp->counters.bytes_rx = clib_net_to_host_u64 (result_rx.bytes); + rmp->counters.bytes_tx = clib_net_to_host_u64 (result_tx.bytes); + + vl_api_send_msg (reg, (u8 *) rmp); +} + +static void +vl_api_gtpu_tunnel_v2_dump_t_handler (vl_api_gtpu_tunnel_v2_dump_t *mp) +{ + vl_api_registration_t *reg; + gtpu_main_t *gtm = >pu_main; + gtpu_tunnel_t *t; + u32 sw_if_index; + + reg = vl_api_client_index_to_registration (mp->client_index); + if (!reg) + return; + + sw_if_index = ntohl (mp->sw_if_index); + + if (~0 == sw_if_index) + { + pool_foreach (t, gtm->tunnels) + { + send_gtpu_tunnel_details_v2 (t, reg, mp->context); + } + } + else + { + if ((sw_if_index >= vec_len (gtm->tunnel_index_by_sw_if_index)) || + (~0 == gtm->tunnel_index_by_sw_if_index[sw_if_index])) + { + return; + } + t = >m->tunnels[gtm->tunnel_index_by_sw_if_index[sw_if_index]]; + send_gtpu_tunnel_details_v2 (t, reg, mp->context); + } +} + +static void +vl_api_gtpu_add_del_forward_t_handler (vl_api_gtpu_add_del_forward_t *mp) +{ + vl_api_gtpu_add_del_forward_reply_t *rmp; + int rv = 0; + gtpu_main_t *gtm = >pu_main; + + vnet_gtpu_add_mod_del_tunnel_args_t a = { + .opn = mp->is_add ? GTPU_ADD_TUNNEL : GTPU_DEL_TUNNEL, + .mcast_sw_if_index = 0, + .decap_next_index = ntohl (mp->decap_next_index), + .teid = 0, + .tteid = 0, + .pdu_extension = 0, + .qfi = 0, + .is_forwarding = 1, + .forwarding_type = ntohl (mp->forwarding_type), + }; + ip_address_decode (&mp->dst_address, &a.dst); + /* Will be overwritten later */ + ip_address_decode (&mp->dst_address, &a.src); + + u8 is_ipv6 = !ip46_address_is_ip4 (&a.dst); + a.encap_fib_index = + fib_table_find (fib_ip_proto (is_ipv6), ntohl (mp->encap_vrf_id)); + + if (a.encap_fib_index == ~0) + { + rv = VNET_API_ERROR_NO_SUCH_FIB; + goto out; + } + + if (ip46_address_is_multicast (&a.dst) && + !vnet_sw_if_index_is_api_valid (a.mcast_sw_if_index)) + { + rv = VNET_API_ERROR_INVALID_SW_IF_INDEX; + goto out; + } + + u32 sw_if_index = ~0; + rv = vnet_gtpu_add_del_forwarding (&a, &sw_if_index); + +out: + REPLY_MACRO2 (VL_API_GTPU_ADD_DEL_FORWARD_REPLY, + ({ rmp->sw_if_index = ntohl (sw_if_index); })); +} + +static void +vl_api_gtpu_get_transfer_counts_t_handler ( + vl_api_gtpu_get_transfer_counts_t *mp) +{ + vl_api_gtpu_get_transfer_counts_reply_t *rmp; + int rv = 0; + vlib_counter_t result_rx; + vlib_counter_t result_tx; + gtpu_main_t *gtm = >pu_main; + u32 count = 0; + u32 sw_if_index; + u32 capacity = ntohl (mp->capacity); + u32 sw_if_index_start = ntohl (mp->sw_if_index_start); + int extra_size = sizeof (rmp->tunnels[0]) * capacity; + + if (sw_if_index_start >= vec_len (gtm->tunnel_index_by_sw_if_index)) + { + capacity = 0; + extra_size = 0; + } + sw_if_index = sw_if_index_start; + + REPLY_MACRO4 ( + VL_API_GTPU_GET_TRANSFER_COUNTS_REPLY, extra_size, ({ + for (; count < capacity; sw_if_index++) + { + if (sw_if_index >= vec_len (gtm->tunnel_index_by_sw_if_index)) + { + // No more tunnels + break; + } + if (~0 == gtm->tunnel_index_by_sw_if_index[sw_if_index]) + { + // Skip inactive/deleted tunnel + continue; + } + rmp->tunnels[count].sw_if_index = htonl (sw_if_index); + rmp->tunnels[count].reserved = 0; + + get_combined_counters (sw_if_index, &result_rx, &result_tx); + rmp->tunnels[count].counters.packets_rx = + clib_net_to_host_u64 (result_rx.packets); + rmp->tunnels[count].counters.packets_tx = + clib_net_to_host_u64 (result_tx.packets); + rmp->tunnels[count].counters.bytes_rx = + clib_net_to_host_u64 (result_rx.bytes); + rmp->tunnels[count].counters.bytes_tx = + clib_net_to_host_u64 (result_tx.bytes); + count++; + } + rmp->count = htonl (count); + })); +} + #include <gtpu/gtpu.api.c> static clib_error_t * gtpu_api_hookup (vlib_main_t * vm) diff --git a/src/plugins/gtpu/gtpu_decap.c b/src/plugins/gtpu/gtpu_decap.c index 40243dbcc53..21e38297ccf 100644 --- a/src/plugins/gtpu/gtpu_decap.c +++ b/src/plugins/gtpu/gtpu_decap.c @@ -26,6 +26,8 @@ typedef struct { u32 tunnel_index; u32 error; u32 teid; + gtpu_header_t header; + u8 forwarding_type; } gtpu_rx_trace_t; static u8 * format_gtpu_rx_trace (u8 * s, va_list * args) @@ -36,14 +38,29 @@ static u8 * format_gtpu_rx_trace (u8 * s, va_list * args) if (t->tunnel_index != ~0) { - s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d", - t->tunnel_index, t->teid, t->next_index, t->error); + s = format (s, "GTPU decap from gtpu_tunnel%d ", t->tunnel_index); + switch (t->forwarding_type) + { + case GTPU_FORWARD_BAD_HEADER: + s = format (s, "forwarding bad-header "); + break; + case GTPU_FORWARD_UNKNOWN_TEID: + s = format (s, "forwarding unknown-teid "); + break; + case GTPU_FORWARD_UNKNOWN_TYPE: + s = format (s, "forwarding unknown-type "); + break; + } + s = format (s, "teid %u, ", t->teid); } else { - s = format (s, "GTPU decap error - tunnel for teid %d does not exist", + s = format (s, "GTPU decap error - tunnel for teid %u does not exist, ", t->teid); } + s = format (s, "next %d error %d, ", t->next_index, t->error); + s = format (s, "flags: 0x%x, type: %d, length: %d", t->header.ver_flags, + t->header.type, t->header.length); return s; } @@ -53,6 +70,7 @@ validate_gtpu_fib (vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4) return t->encap_fib_index == vlib_buffer_get_ip_fib_index (b, is_ip4); } +// Gets run with every input always_inline uword gtpu_input (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -75,28 +93,41 @@ gtpu_input (vlib_main_t * vm, else clib_memset (&last_key6, 0xff, sizeof (last_key6)); + // Where is the framevector coming from from = vlib_frame_vector_args (from_frame); + // number of packets left in frame n_left_from = from_frame->n_vectors; + // whats the next node it needs to go to next_index = node->cached_next_index; + // stats from the next interface stats_sw_if_index = node->runtime_data[0]; + // number of packets processed stats_n_packets = stats_n_bytes = 0; + // run until no more packets left in vectorframe while (n_left_from > 0) { u32 n_left_to_next; + // get vectorframe to process vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + // while there are still more than 4 packets left in frame and more than + // two packets in current frame while (n_left_from >= 4 && n_left_to_next >= 2) { - u32 bi0, bi1; + // buffer index for loading packet data + u32 bi0, bi1; + // vlib packet buffer vlib_buffer_t * b0, * b1; + // next operation to do with the packet u32 next0, next1; - ip4_header_t * ip4_0, * ip4_1; - ip6_header_t * ip6_0, * ip6_1; - gtpu_header_t * gtpu0, * gtpu1; - u32 gtpu_hdr_len0, gtpu_hdr_len1; + // IP4 header type + ip4_header_t *ip4_0, *ip4_1; + ip6_header_t *ip6_0, *ip6_1; + gtpu_header_t *gtpu0, *gtpu1; + i32 gtpu_hdr_len0, gtpu_hdr_len1; uword * p0, * p1; u32 tunnel_index0, tunnel_index1; gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL; @@ -106,11 +137,19 @@ gtpu_input (vlib_main_t * vm, u32 sw_if_index0, sw_if_index1, len0, len1; u8 has_space0, has_space1; u8 ver0, ver1; + udp_header_t *udp0, *udp1; + ip_csum_t sum0, sum1; + u32 old0, old1; + gtpu_ext_header_t ext = { .type = 0, .len = 0, .pad = 0 }; + gtpu_ext_header_t *ext0, *ext1; + bool is_fast_track0, is_fast_track1; + ext0 = ext1 = &ext; /* Prefetch next iteration. */ { vlib_buffer_t * p2, * p3; + // prefetch 3 and 4 p2 = vlib_get_buffer (vm, from[2]); p3 = vlib_get_buffer (vm, from[3]); @@ -121,57 +160,172 @@ gtpu_input (vlib_main_t * vm, CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); } + // getting buffer index from vectorframe bi0 = from[0]; bi1 = from[1]; + // pre inserting the packets for the next node to_next[0] = bi0; to_next[1] = bi1; + // forward in vectorframe from += 2; + // forward next node to_next += 2; + // decimate message counter for next node n_left_to_next -= 2; + // decimate message counter for current progessing node n_left_from -= 2; + // load packets into buffer b0 = vlib_get_buffer (vm, bi0); b1 = vlib_get_buffer (vm, bi1); /* udp leaves current_data pointing at the gtpu header */ - gtpu0 = vlib_buffer_get_current (b0); - gtpu1 = vlib_buffer_get_current (b1); - if (is_ip4) - { - ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t)); - ip4_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip4_header_t)); - } - else - { - ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t)); - ip6_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip6_header_t)); - } + // get pointers to the beginnings of the gtpu frame + gtpu0 = vlib_buffer_get_current (b0); + gtpu1 = vlib_buffer_get_current (b1); + if (is_ip4) + { + ip4_0 = (void *) ((u8 *) gtpu0 - sizeof (udp_header_t) - + sizeof (ip4_header_t)); + ip4_1 = (void *) ((u8 *) gtpu1 - sizeof (udp_header_t) - + sizeof (ip4_header_t)); + } + else + { + ip6_0 = (void *) ((u8 *) gtpu0 - sizeof (udp_header_t) - + sizeof (ip6_header_t)); + ip6_1 = (void *) ((u8 *) gtpu1 - sizeof (udp_header_t) - + sizeof (ip6_header_t)); + } + udp0 = (void *) ((u8 *) gtpu0 - sizeof (udp_header_t)); + udp1 = (void *) ((u8 *) gtpu1 - sizeof (udp_header_t)); - tunnel_index0 = ~0; - error0 = 0; + tunnel_index0 = ~0; + error0 = 0; - tunnel_index1 = ~0; - error1 = 0; + tunnel_index1 = ~0; + error1 = 0; - /* speculatively load gtp header version field */ - ver0 = gtpu0->ver_flags; - ver1 = gtpu1->ver_flags; + /* speculatively load gtp header version field */ + ver0 = gtpu0->ver_flags; + ver1 = gtpu1->ver_flags; /* * Manipulate gtpu header * TBD: Manipulate Sequence Number and N-PDU Number * TBD: Manipulate Next Extension Header */ - gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4); - gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4); - - has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0); - has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1); - if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0))) + /* Perform all test assuming the packet has the needed space. + * Check if version 1, not PT, not reserved. + * Check message type 255. + */ + is_fast_track0 = + ((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)); + is_fast_track0 = is_fast_track0 & (gtpu0->type == 255); + + is_fast_track1 = + ((ver1 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)); + is_fast_track1 = is_fast_track1 & (gtpu1->type == 255); + + /* Make the header overlap the end of the gtpu_header_t, so + * that it starts with the same Next extension header as the + * gtpu_header_t. + * This means that the gtpu_ext_header_t (ext) has the type + * from the previous header and the length from the current one. + * Works both for the first gtpu_header_t and all following + * gtpu_ext_header_t extensions. + * Copy the ext data if the E bit is set, else use the 0 value. + */ + ext0 = (ver0 & GTPU_E_BIT) ? + (gtpu_ext_header_t *) >pu0->next_ext_type : + &ext; + ext1 = (ver1 & GTPU_E_BIT) ? + (gtpu_ext_header_t *) >pu1->next_ext_type : + &ext; + + /* One or more of the E, S and PN flags are set, so all 3 fields + * must be present: + * The gtpu_header_t contains the Sequence number, N-PDU number and + * Next extension header type. + * If E is not set subtract 4 bytes from the header. + * Then add the length of the extension. 0 * 4 if E is not set, + * else it's the ext->len from the gtp extension. Length is multiple + * of 4 always. + * Note: This length is only valid if the header itself is valid, + * so it must be verified before use. + */ + gtpu_hdr_len0 = sizeof (gtpu_header_t) - + (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4) + + ext0->len * 4; + gtpu_hdr_len1 = sizeof (gtpu_header_t) - + (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4) + + ext1->len * 4; + + /* Get the next extension, unconditionally. + * If E was not set in the gtp header ext->len is zero. + * If E was set ext0 will now point to the packet buffer. + * If the gtp packet is illegal this might point outside the buffer. + * TBD check the updated for ext0->type != 0, and continue removing + * extensions. Only for clarity, will be optimized away. + */ + ext0 += ext0->len * 4 / sizeof (*ext0); + ext1 += ext1->len * 4 / sizeof (*ext1); + + /* Check the space, if this is true then ext0 points to a valid + * location in the buffer as well. + */ + has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0); + has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1); + + /* Diverge the packet paths for 0 and 1 */ + if (PREDICT_FALSE ((!is_fast_track0) | (!has_space0))) { - error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; + /* Not fast path. ext0 and gtpu_hdr_len0 might be wrong */ + + /* GCC will hopefully fix the duplicate compute */ + if (PREDICT_FALSE ( + !((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)) | + (!has_space0))) + { + /* The header or size is wrong */ + error0 = + has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; + next0 = GTPU_INPUT_NEXT_DROP; + + /* This is an unsupported/bad packet. + * Check if it is to be forwarded. + */ + if (is_ip4) + tunnel_index0 = gtm->bad_header_forward_tunnel_index_ipv4; + else + tunnel_index0 = gtm->bad_header_forward_tunnel_index_ipv6; + + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward0; + + goto trace0; + } + /* Correct version and has the space. It can only be unknown + * message type. + */ + error0 = GTPU_ERROR_UNSUPPORTED_TYPE; next0 = GTPU_INPUT_NEXT_DROP; + + /* This is an error/nonstandard packet + * Check if it is to be forwarded. */ + if (is_ip4) + tunnel_index0 = gtm->unknown_type_forward_tunnel_index_ipv4; + else + tunnel_index0 = gtm->unknown_type_forward_tunnel_index_ipv6; + + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward0; + + /* The packet is ipv6/not forwarded */ goto trace0; } @@ -180,22 +334,31 @@ gtpu_input (vlib_main_t * vm, key4_0.src = ip4_0->src_address.as_u32; key4_0.teid = gtpu0->teid; - /* Make sure GTPU tunnel exist according to packet SIP and teid - * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */ - if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64)) - { - p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64); - if (PREDICT_FALSE (p0 == NULL)) - { - error0 = GTPU_ERROR_NO_SUCH_TUNNEL; - next0 = GTPU_INPUT_NEXT_DROP; - goto trace0; - } - last_key4.as_u64 = key4_0.as_u64; - tunnel_index0 = last_tunnel_index = p0[0]; - } - else - tunnel_index0 = last_tunnel_index; + /* Make sure GTPU tunnel exist according to packet SourceIP and + * teid SourceIP identify a GTPU path, and teid identify a tunnel + * in a given GTPU path */ + if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64)) + { + p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64); + if (PREDICT_FALSE (p0 == NULL)) + { + error0 = GTPU_ERROR_NO_SUCH_TUNNEL; + next0 = GTPU_INPUT_NEXT_DROP; + /* This is a standard packet, but no tunnel was found. + * Check if it is to be forwarded. */ + tunnel_index0 = + gtm->unknown_teid_forward_tunnel_index_ipv4; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward0; + goto trace0; + } + last_key4.as_u64 = key4_0.as_u64; + tunnel_index0 = last_tunnel_index = p0[0]; + } + else // when the address of the packet is the same as the packet + // before ... saving lookup in table + tunnel_index0 = last_tunnel_index; + // tunnel index in vpp t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); /* Validate GTPU tunnel encap-fib index against packet */ @@ -203,10 +366,13 @@ gtpu_input (vlib_main_t * vm, { error0 = GTPU_ERROR_NO_SUCH_TUNNEL; next0 = GTPU_INPUT_NEXT_DROP; + tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv4; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward0; goto trace0; } - /* Validate GTPU tunnel SIP against packet DIP */ + /* Validate GTPU tunnel SourceIP against packet DestinationIP */ if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32)) goto next0; /* valid packet */ if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address))) @@ -223,6 +389,9 @@ gtpu_input (vlib_main_t * vm, } error0 = GTPU_ERROR_NO_SUCH_TUNNEL; next0 = GTPU_INPUT_NEXT_DROP; + tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv4; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward0; goto trace0; } else /* !is_ip4 */ { @@ -239,13 +408,19 @@ gtpu_input (vlib_main_t * vm, { error0 = GTPU_ERROR_NO_SUCH_TUNNEL; next0 = GTPU_INPUT_NEXT_DROP; - goto trace0; - } - clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0)); - tunnel_index0 = last_tunnel_index = p0[0]; - } - else - tunnel_index0 = last_tunnel_index; + /* This is a standard packet, but no tunnel was found. + * Check if it is to be forwarded. */ + tunnel_index0 = + gtm->unknown_teid_forward_tunnel_index_ipv6; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward0; + goto trace0; + } + clib_memcpy_fast (&last_key6, &key6_0, sizeof (key6_0)); + tunnel_index0 = last_tunnel_index = p0[0]; + } + else + tunnel_index0 = last_tunnel_index; t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); /* Validate GTPU tunnel encap-fib index against packet */ @@ -253,6 +428,9 @@ gtpu_input (vlib_main_t * vm, { error0 = GTPU_ERROR_NO_SUCH_TUNNEL; next0 = GTPU_INPUT_NEXT_DROP; + tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv6; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward0; goto trace0; } @@ -274,28 +452,85 @@ gtpu_input (vlib_main_t * vm, } error0 = GTPU_ERROR_NO_SUCH_TUNNEL; next0 = GTPU_INPUT_NEXT_DROP; + tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv6; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward0; goto trace0; } + forward0: + /* Get the tunnel */ + t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); + + /* Validate GTPU tunnel encap-fib index against packet */ + if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0)) + { + error0 = GTPU_ERROR_NO_ERROR_TUNNEL; + next0 = GTPU_INPUT_NEXT_DROP; + goto trace0; + } + + /* Clear the error, next0 will be overwritten by the tunnel */ + error0 = 0; + + if (is_ip4) + { + /* Forward packet instead. Push the IP+UDP header */ + gtpu_hdr_len0 = + -(i32) (sizeof (udp_header_t) + sizeof (ip4_header_t)); + /* Backup the IP4 checksum and address */ + sum0 = ip4_0->checksum; + old0 = ip4_0->dst_address.as_u32; + + /* Update IP address of the packet using the src from the tunnel + */ + ip4_0->dst_address.as_u32 = t0->src.ip4.as_u32; + + /* Fix the IP4 checksum */ + sum0 = ip_csum_update (sum0, old0, ip4_0->dst_address.as_u32, + ip4_header_t, + dst_address /* changed member */); + ip4_0->checksum = ip_csum_fold (sum0); + } + else + { + /* Forward packet instead. Push the IP+UDP header */ + gtpu_hdr_len0 = + -(i32) (sizeof (udp_header_t) + sizeof (ip6_header_t)); + /* IPv6 UDP checksum is mandatory */ + int bogus = 0; + udp0->checksum = + ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6_0, &bogus); + if (udp0->checksum == 0) + udp0->checksum = 0xffff; + } next0: - /* Pop gtpu header */ + /* Pop/Remove gtpu header from buffered package or push existing + * IP+UDP header back to the buffer*/ vlib_buffer_advance (b0, gtpu_hdr_len0); - next0 = t0->decap_next_index; - sw_if_index0 = t0->sw_if_index; - len0 = vlib_buffer_length_in_chain (vm, b0); + // where does it need to go in the graph next + next0 = t0->decap_next_index; + // interface index the package is on + sw_if_index0 = t0->sw_if_index; + len0 = vlib_buffer_length_in_chain (vm, b0); - /* Required to make the l2 tag push / pop code work on l2 subifs */ - if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT)) - vnet_update_l2_len (b0); + // Next three lines are for forwarding the payload to L2 + // subinterfaces + /* Required to make the l2 tag push / pop code work on l2 subifs */ + if (PREDICT_TRUE (next0 == GTPU_INPUT_NEXT_L2_INPUT)) + vnet_update_l2_len (b0); - /* Set packet input sw_if_index to unicast GTPU tunnel for learning */ - vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0; + /* Set packet input sw_if_index to unicast GTPU tunnel for learning + */ + vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0; + // in case its a multicast packet set different interface index sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0; - pkts_decapsulated ++; - stats_n_packets += 1; - stats_n_bytes += len0; + // Update stats + pkts_decapsulated++; + stats_n_packets += 1; + stats_n_bytes += len0; /* Batch stats increment on the same gtpu tunnel so counter is not incremented per packet */ @@ -324,12 +559,61 @@ gtpu_input (vlib_main_t * vm, tr->error = error0; tr->tunnel_index = tunnel_index0; tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0; - } - if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1))) + if (vlib_buffer_has_space (b0, 4)) + { + tr->header.ver_flags = gtpu0->ver_flags; + tr->header.type = gtpu0->type; + tr->header.length = clib_net_to_host_u16 (gtpu0->length); + } + } + + /* End of processing for packet 0, start for packet 1 */ + if (PREDICT_FALSE ((!is_fast_track1) | (!has_space1))) { - error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; + /* Not fast path. ext1 and gtpu_hdr_len1 might be wrong */ + + /* GCC will hopefully fix the duplicate compute */ + if (PREDICT_FALSE ( + !((ver1 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)) | + (!has_space1))) + { + /* The header or size is wrong */ + error1 = + has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; + next1 = GTPU_INPUT_NEXT_DROP; + + /* This is an unsupported/bad packet. + * Check if it is to be forwarded. + */ + if (is_ip4) + tunnel_index1 = gtm->bad_header_forward_tunnel_index_ipv4; + else + tunnel_index1 = gtm->bad_header_forward_tunnel_index_ipv6; + + if (PREDICT_FALSE (tunnel_index1 != ~0)) + goto forward1; + + goto trace1; + } + /* Correct version and has the space. It can only be unknown + * message type. + */ + error1 = GTPU_ERROR_UNSUPPORTED_TYPE; next1 = GTPU_INPUT_NEXT_DROP; + + /* This is an error/nonstandard packet + * Check if it is to be forwarded. */ + if (is_ip4) + tunnel_index1 = gtm->unknown_type_forward_tunnel_index_ipv4; + else + tunnel_index1 = gtm->unknown_type_forward_tunnel_index_ipv6; + + if (PREDICT_FALSE (tunnel_index1 != ~0)) + goto forward1; + + /* The packet is ipv6/not forwarded */ goto trace1; } @@ -347,20 +631,27 @@ gtpu_input (vlib_main_t * vm, { error1 = GTPU_ERROR_NO_SUCH_TUNNEL; next1 = GTPU_INPUT_NEXT_DROP; - goto trace1; - } - last_key4.as_u64 = key4_1.as_u64; - tunnel_index1 = last_tunnel_index = p1[0]; - } - else - tunnel_index1 = last_tunnel_index; - t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1); + tunnel_index1 = + gtm->unknown_teid_forward_tunnel_index_ipv4; + if (PREDICT_FALSE (tunnel_index1 != ~0)) + goto forward1; + goto trace1; + } + last_key4.as_u64 = key4_1.as_u64; + tunnel_index1 = last_tunnel_index = p1[0]; + } + else + tunnel_index1 = last_tunnel_index; + t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1); /* Validate GTPU tunnel encap-fib index against packet */ if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0)) { error1 = GTPU_ERROR_NO_SUCH_TUNNEL; next1 = GTPU_INPUT_NEXT_DROP; + tunnel_index1 = gtm->unknown_teid_forward_tunnel_index_ipv4; + if (PREDICT_FALSE (tunnel_index1 != ~0)) + goto forward1; goto trace1; } @@ -381,6 +672,9 @@ gtpu_input (vlib_main_t * vm, } error1 = GTPU_ERROR_NO_SUCH_TUNNEL; next1 = GTPU_INPUT_NEXT_DROP; + tunnel_index1 = gtm->unknown_teid_forward_tunnel_index_ipv4; + if (PREDICT_FALSE (tunnel_index1 != ~0)) + goto forward1; goto trace1; } else /* !is_ip4 */ { @@ -398,21 +692,28 @@ gtpu_input (vlib_main_t * vm, { error1 = GTPU_ERROR_NO_SUCH_TUNNEL; next1 = GTPU_INPUT_NEXT_DROP; - goto trace1; - } + tunnel_index1 = + gtm->unknown_teid_forward_tunnel_index_ipv6; + if (PREDICT_FALSE (tunnel_index1 != ~0)) + goto forward1; + goto trace1; + } - clib_memcpy_fast (&last_key6, &key6_1, sizeof(key6_1)); - tunnel_index1 = last_tunnel_index = p1[0]; - } - else - tunnel_index1 = last_tunnel_index; - t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1); + clib_memcpy_fast (&last_key6, &key6_1, sizeof (key6_1)); + tunnel_index1 = last_tunnel_index = p1[0]; + } + else + tunnel_index1 = last_tunnel_index; + t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1); /* Validate GTPU tunnel encap-fib index against packet */ if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0)) { error1 = GTPU_ERROR_NO_SUCH_TUNNEL; next1 = GTPU_INPUT_NEXT_DROP; + tunnel_index1 = gtm->unknown_teid_forward_tunnel_index_ipv6; + if (PREDICT_FALSE (tunnel_index1 != ~0)) + goto forward1; goto trace1; } @@ -434,11 +735,63 @@ gtpu_input (vlib_main_t * vm, } error1 = GTPU_ERROR_NO_SUCH_TUNNEL; next1 = GTPU_INPUT_NEXT_DROP; + tunnel_index1 = gtm->unknown_teid_forward_tunnel_index_ipv6; + if (PREDICT_FALSE (tunnel_index1 != ~0)) + goto forward1; goto trace1; } + forward1: + + /* Get the tunnel */ + t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1); + + /* Validate GTPU tunnel encap-fib index against packet */ + if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0)) + { + error1 = GTPU_ERROR_NO_ERROR_TUNNEL; + next1 = GTPU_INPUT_NEXT_DROP; + goto trace1; + } + + /* Clear the error, next0 will be overwritten by the tunnel */ + error1 = 0; + + if (is_ip4) + { + /* Forward packet instead. Push the IP+UDP header */ + gtpu_hdr_len1 = + -(i32) (sizeof (udp_header_t) + sizeof (ip4_header_t)); + + /* Backup the IP4 checksum and address */ + sum1 = ip4_1->checksum; + old1 = ip4_1->dst_address.as_u32; + + /* Update IP address of the packet using the src from the tunnel + */ + ip4_1->dst_address.as_u32 = t1->src.ip4.as_u32; + + /* Fix the IP4 checksum */ + sum1 = ip_csum_update (sum1, old1, ip4_1->dst_address.as_u32, + ip4_header_t, + dst_address /* changed member */); + ip4_1->checksum = ip_csum_fold (sum1); + } + else + { + /* Forward packet instead. Push the IP+UDP header */ + gtpu_hdr_len1 = + -(i32) (sizeof (udp_header_t) + sizeof (ip6_header_t)); + + /* IPv6 UDP checksum is mandatory */ + int bogus = 0; + udp1->checksum = + ip6_tcp_udp_icmp_compute_checksum (vm, b1, ip6_1, &bogus); + if (udp1->checksum == 0) + udp1->checksum = 0xffff; + } next1: - /* Pop gtpu header */ + /* Pop gtpu header / push IP+UDP header */ vlib_buffer_advance (b1, gtpu_hdr_len1); next1 = t1->decap_next_index; @@ -484,13 +837,21 @@ gtpu_input (vlib_main_t * vm, tr->error = error1; tr->tunnel_index = tunnel_index1; tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0; - } + if (vlib_buffer_has_space (b1, 4)) + { + tr->header.ver_flags = gtpu1->ver_flags; + tr->header.type = gtpu1->type; + tr->header.length = clib_net_to_host_u16 (gtpu1->length); + } + } vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1); } + /* In case there are less than 4 packets left in frame and packets in + current frame aka single processing */ while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; @@ -499,7 +860,7 @@ gtpu_input (vlib_main_t * vm, ip4_header_t * ip4_0; ip6_header_t * ip6_0; gtpu_header_t * gtpu0; - u32 gtpu_hdr_len0; + i32 gtpu_hdr_len0; uword * p0; u32 tunnel_index0; gtpu_tunnel_t * t0, * mt0 = NULL; @@ -509,6 +870,13 @@ gtpu_input (vlib_main_t * vm, u32 sw_if_index0, len0; u8 has_space0; u8 ver0; + udp_header_t *udp0; + ip_csum_t sum0; + u32 old0; + gtpu_ext_header_t ext = { .type = 0, .len = 0, .pad = 0 }; + gtpu_ext_header_t *ext0; + bool is_fast_track0; + ext0 = &ext; bi0 = from[0]; to_next[0] = bi0; @@ -526,112 +894,197 @@ gtpu_input (vlib_main_t * vm, } else { ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t)); } + udp0 = (void *) ((u8 *) gtpu0 - sizeof (udp_header_t)); - tunnel_index0 = ~0; - error0 = 0; - - /* speculatively load gtp header version field */ - ver0 = gtpu0->ver_flags; + tunnel_index0 = ~0; + error0 = 0; + /* speculatively load gtp header version field */ + ver0 = gtpu0->ver_flags; /* * Manipulate gtpu header * TBD: Manipulate Sequence Number and N-PDU Number * TBD: Manipulate Next Extension Header */ - gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4); - has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0); + is_fast_track0 = + ((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)); + is_fast_track0 = is_fast_track0 & (gtpu0->type == 255); - if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0))) - { - error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; - next0 = GTPU_INPUT_NEXT_DROP; - goto trace00; - } + ext0 = (ver0 & GTPU_E_BIT) ? + (gtpu_ext_header_t *) >pu0->next_ext_type : + &ext; - if (is_ip4) { - key4_0.src = ip4_0->src_address.as_u32; - key4_0.teid = gtpu0->teid; + gtpu_hdr_len0 = sizeof (gtpu_header_t) - + (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4) + + ext0->len * 4; - /* Make sure GTPU tunnel exist according to packet SIP and teid - * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */ - if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64)) - { - p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64); - if (PREDICT_FALSE (p0 == NULL)) - { - error0 = GTPU_ERROR_NO_SUCH_TUNNEL; - next0 = GTPU_INPUT_NEXT_DROP; - goto trace00; - } - last_key4.as_u64 = key4_0.as_u64; - tunnel_index0 = last_tunnel_index = p0[0]; - } - else - tunnel_index0 = last_tunnel_index; - t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); + ext0 += ext0->len * 4 / sizeof (*ext0); - /* Validate GTPU tunnel encap-fib index against packet */ - if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0)) - { - error0 = GTPU_ERROR_NO_SUCH_TUNNEL; - next0 = GTPU_INPUT_NEXT_DROP; - goto trace00; - } + has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0); - /* Validate GTPU tunnel SIP against packet DIP */ - if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32)) - goto next00; /* valid packet */ - if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address))) - { - key4_0.src = ip4_0->dst_address.as_u32; - key4_0.teid = gtpu0->teid; - /* Make sure mcast GTPU tunnel exist by packet DIP and teid */ - p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64); - if (PREDICT_TRUE (p0 != NULL)) - { - mt0 = pool_elt_at_index (gtm->tunnels, p0[0]); - goto next00; /* valid packet */ - } - } - error0 = GTPU_ERROR_NO_SUCH_TUNNEL; - next0 = GTPU_INPUT_NEXT_DROP; - goto trace00; + if (PREDICT_FALSE ((!is_fast_track0) | (!has_space0))) + { + /* Not fast path. ext0 and gtpu_hdr_len0 might be wrong */ + + /* GCC will hopefully fix the duplicate compute */ + if (PREDICT_FALSE ( + !((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)) | + (!has_space0))) + { + /* The header or size is wrong */ + error0 = + has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; + next0 = GTPU_INPUT_NEXT_DROP; + + /* This is an unsupported/bad packet. + * Check if it is to be forwarded. + */ + if (is_ip4) + tunnel_index0 = gtm->bad_header_forward_tunnel_index_ipv4; + else + tunnel_index0 = gtm->bad_header_forward_tunnel_index_ipv6; - } else /* !is_ip4 */ { - key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0]; - key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1]; - key6_0.teid = gtpu0->teid; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward00; - /* Make sure GTPU tunnel exist according to packet SIP and teid - * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */ - if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0)) - { - p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0); - if (PREDICT_FALSE (p0 == NULL)) - { - error0 = GTPU_ERROR_NO_SUCH_TUNNEL; - next0 = GTPU_INPUT_NEXT_DROP; - goto trace00; - } - clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0)); - tunnel_index0 = last_tunnel_index = p0[0]; - } - else - tunnel_index0 = last_tunnel_index; - t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); + goto trace00; + } + /* Correct version and has the space. It can only be unknown + * message type + */ + error0 = GTPU_ERROR_UNSUPPORTED_TYPE; + next0 = GTPU_INPUT_NEXT_DROP; - /* Validate GTPU tunnel encap-fib index against packet */ - if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0)) - { - error0 = GTPU_ERROR_NO_SUCH_TUNNEL; - next0 = GTPU_INPUT_NEXT_DROP; - goto trace00; - } + /* This is an error/nonstandard packet + * Check if it is to be forwarded. */ + if (is_ip4) + tunnel_index0 = gtm->unknown_type_forward_tunnel_index_ipv4; + else + tunnel_index0 = gtm->unknown_type_forward_tunnel_index_ipv6; - /* Validate GTPU tunnel SIP against packet DIP */ - if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address, - &t0->src.ip6))) + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward00; + + /* The packet is ipv6/not forwarded */ + goto trace00; + } + + if (is_ip4) + { + key4_0.src = ip4_0->src_address.as_u32; + key4_0.teid = gtpu0->teid; + + /* Make sure GTPU tunnel exist according to packet SIP and teid + * SIP identify a GTPU path, and teid identify a tunnel in a + * given GTPU path */ + if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64)) + { + // Cache miss, so try normal lookup now. + p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64); + if (PREDICT_FALSE (p0 == NULL)) + { + error0 = GTPU_ERROR_NO_SUCH_TUNNEL; + next0 = GTPU_INPUT_NEXT_DROP; + + /* This is a standard packet, but no tunnel was found. + * Check if it is to be forwarded. */ + tunnel_index0 = + gtm->unknown_teid_forward_tunnel_index_ipv4; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward00; + goto trace00; + } + // Update the key/tunnel cache for normal packets + last_key4.as_u64 = key4_0.as_u64; + tunnel_index0 = last_tunnel_index = p0[0]; + } + else + tunnel_index0 = last_tunnel_index; + t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); + + /* Validate GTPU tunnel encap-fib index against packet */ + if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0)) + { + error0 = GTPU_ERROR_NO_SUCH_TUNNEL; + next0 = GTPU_INPUT_NEXT_DROP; + tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv4; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward00; + goto trace00; + } + + /* Validate GTPU tunnel SIP against packet DIP */ + if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == + t0->src.ip4.as_u32)) + goto next00; /* valid packet */ + if (PREDICT_FALSE ( + ip4_address_is_multicast (&ip4_0->dst_address))) + { + key4_0.src = ip4_0->dst_address.as_u32; + key4_0.teid = gtpu0->teid; + /* Make sure mcast GTPU tunnel exist by packet DIP and teid + */ + p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64); + if (PREDICT_TRUE (p0 != NULL)) + { + mt0 = pool_elt_at_index (gtm->tunnels, p0[0]); + goto next00; /* valid packet */ + } + } + error0 = GTPU_ERROR_NO_SUCH_TUNNEL; + next0 = GTPU_INPUT_NEXT_DROP; + tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv4; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward00; + goto trace00; + } + else /* !is_ip4 */ + { + key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0]; + key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1]; + key6_0.teid = gtpu0->teid; + + /* Make sure GTPU tunnel exist according to packet SIP and teid + * SIP identify a GTPU path, and teid identify a tunnel in a + * given GTPU path */ + if (PREDICT_FALSE ( + memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0)) + { + p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0); + if (PREDICT_FALSE (p0 == NULL)) + { + error0 = GTPU_ERROR_NO_SUCH_TUNNEL; + next0 = GTPU_INPUT_NEXT_DROP; + tunnel_index0 = + gtm->unknown_teid_forward_tunnel_index_ipv6; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward00; + goto trace00; + } + clib_memcpy_fast (&last_key6, &key6_0, sizeof (key6_0)); + tunnel_index0 = last_tunnel_index = p0[0]; + } + else + tunnel_index0 = last_tunnel_index; + t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); + + /* Validate GTPU tunnel encap-fib index against packet */ + if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0)) + { + error0 = GTPU_ERROR_NO_SUCH_TUNNEL; + next0 = GTPU_INPUT_NEXT_DROP; + tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv6; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward00; + goto trace00; + } + + /* Validate GTPU tunnel SIP against packet DIP */ + if (PREDICT_TRUE ( + ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6))) goto next00; /* valid packet */ if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address))) { @@ -647,11 +1100,63 @@ gtpu_input (vlib_main_t * vm, } error0 = GTPU_ERROR_NO_SUCH_TUNNEL; next0 = GTPU_INPUT_NEXT_DROP; + tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv6; + if (PREDICT_FALSE (tunnel_index0 != ~0)) + goto forward00; goto trace00; - } + } + + /* This can only be reached via goto */ + forward00: + // Get the tunnel + t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); + + /* Validate GTPU tunnel encap-fib index against packet */ + if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0)) + { + error0 = GTPU_ERROR_NO_ERROR_TUNNEL; + next0 = GTPU_INPUT_NEXT_DROP; + goto trace00; + } + + /* Clear the error, next0 will be overwritten by the tunnel */ + error0 = 0; + + if (is_ip4) + { + /* Forward packet instead. Push the IP+UDP header */ + gtpu_hdr_len0 = + -(i32) (sizeof (udp_header_t) + sizeof (ip4_header_t)); + /* Backup the IP4 checksum and address */ + sum0 = ip4_0->checksum; + old0 = ip4_0->dst_address.as_u32; + + /* Update IP address of the packet using the src from the tunnel + */ + ip4_0->dst_address.as_u32 = t0->src.ip4.as_u32; + + /* Fix the IP4 checksum */ + sum0 = ip_csum_update (sum0, old0, ip4_0->dst_address.as_u32, + ip4_header_t, + dst_address /* changed member */); + ip4_0->checksum = ip_csum_fold (sum0); + } + else + { + /* Forward packet instead. Push the IP+UDP header */ + gtpu_hdr_len0 = + -(i32) (sizeof (udp_header_t) + sizeof (ip6_header_t)); + + /* IPv6 UDP checksum is mandatory */ + int bogus = 0; + udp0->checksum = + ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6_0, &bogus); + if (udp0->checksum == 0) + udp0->checksum = 0xffff; + } next00: - /* Pop gtpu header */ + /* Pop gtpu header / push IP+UDP header */ vlib_buffer_advance (b0, gtpu_hdr_len0); next0 = t0->decap_next_index; @@ -697,7 +1202,13 @@ gtpu_input (vlib_main_t * vm, tr->error = error0; tr->tunnel_index = tunnel_index0; tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0; - } + if (vlib_buffer_has_space (b0, 4)) + { + tr->header.ver_flags = gtpu0->ver_flags; + tr->header.type = gtpu0->type; + tr->header.length = clib_net_to_host_u16 (gtpu0->length); + } + } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); @@ -790,6 +1301,8 @@ typedef enum { IP_GTPU_BYPASS_N_NEXT, } ip_vxan_bypass_next_t; +/* this function determines if a udp packet is actually gtpu and needs + forwarding to gtpu_input */ always_inline uword ip_gtpu_bypass_inline (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -1356,128 +1869,183 @@ gtpu_flow_input (vlib_main_t * vm, u32 sw_if_index0, sw_if_index1, len0, len1; u8 has_space0 = 0, has_space1 = 0; u8 ver0, ver1; + gtpu_ext_header_t ext = { .type = 0, .len = 0, .pad = 0 }; + gtpu_ext_header_t *ext0, *ext1; + bool is_fast_track0, is_fast_track1; + ext0 = ext1 = &ext; - /* Prefetch next iteration. */ - { - vlib_buffer_t * p2, * p3; + /* Prefetch next iteration. */ + { + vlib_buffer_t *p2, *p3; - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); - vlib_prefetch_buffer_header (p2, LOAD); - vlib_prefetch_buffer_header (p3, LOAD); + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); - CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD); - } + CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); + } - bi0 = from[0]; - bi1 = from[1]; - to_next[0] = bi0; - to_next[1] = bi1; - from += 2; - to_next += 2; - n_left_to_next -= 2; - n_left_from -= 2; + bi0 = from[0]; + bi1 = from[1]; + to_next[0] = bi0; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_to_next -= 2; + n_left_from -= 2; - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); - /* udp leaves current_data pointing at the gtpu header */ - gtpu0 = vlib_buffer_get_current (b0); - gtpu1 = vlib_buffer_get_current (b1); + /* udp leaves current_data pointing at the gtpu header */ + gtpu0 = vlib_buffer_get_current (b0); + gtpu1 = vlib_buffer_get_current (b1); - len0 = vlib_buffer_length_in_chain (vm, b0); - len1 = vlib_buffer_length_in_chain (vm, b1); + len0 = vlib_buffer_length_in_chain (vm, b0); + len1 = vlib_buffer_length_in_chain (vm, b1); - tunnel_index0 = ~0; - error0 = 0; - - tunnel_index1 = ~0; - error1 = 0; - - ip_err0 = gtpu_check_ip (b0, len0); - udp_err0 = gtpu_check_ip_udp_len (b0); - ip_err1 = gtpu_check_ip (b1, len1); - udp_err1 = gtpu_check_ip_udp_len (b1); - - if (PREDICT_FALSE (gtpu_local_need_csum_check (b0))) - csum_err0 = !gtpu_validate_udp_csum (vm, b0); - else - csum_err0 = !gtpu_local_csum_is_valid (b0); - if (PREDICT_FALSE (gtpu_local_need_csum_check (b1))) - csum_err1 = !gtpu_validate_udp_csum (vm, b1); - else - csum_err1 = !gtpu_local_csum_is_valid (b1); - - if (ip_err0 || udp_err0 || csum_err0) - { - next0 = GTPU_INPUT_NEXT_DROP; - error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0); - goto trace0; - } - - /* speculatively load gtp header version field */ - ver0 = gtpu0->ver_flags; - - /* - * Manipulate gtpu header - * TBD: Manipulate Sequence Number and N-PDU Number - * TBD: Manipulate Next Extension Header - */ - gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4); - - has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0); - if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0))) - { - error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; - next0 = GTPU_INPUT_NEXT_DROP; - goto trace0; - } - - /* Manipulate packet 0 */ - ASSERT (b0->flow_id != 0); - tunnel_index0 = b0->flow_id - gtm->flow_id_start; - t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); - b0->flow_id = 0; - - /* Pop gtpu header */ - vlib_buffer_advance (b0, gtpu_hdr_len0); - - /* assign the next node */ - if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) && - (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT)) - { - error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR; - next0 = GTPU_INPUT_NEXT_DROP; - goto trace0; - } - next0 = t0->decap_next_index; + tunnel_index0 = ~0; + error0 = 0; - sw_if_index0 = t0->sw_if_index; + tunnel_index1 = ~0; + error1 = 0; - /* Set packet input sw_if_index to unicast GTPU tunnel for learning */ - vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0; + ip_err0 = gtpu_check_ip (b0, len0); + udp_err0 = gtpu_check_ip_udp_len (b0); + ip_err1 = gtpu_check_ip (b1, len1); + udp_err1 = gtpu_check_ip_udp_len (b1); - pkts_decapsulated ++; - stats_n_packets += 1; - stats_n_bytes += len0; + if (PREDICT_FALSE (gtpu_local_need_csum_check (b0))) + csum_err0 = !gtpu_validate_udp_csum (vm, b0); + else + csum_err0 = !gtpu_local_csum_is_valid (b0); + if (PREDICT_FALSE (gtpu_local_need_csum_check (b1))) + csum_err1 = !gtpu_validate_udp_csum (vm, b1); + else + csum_err1 = !gtpu_local_csum_is_valid (b1); - /* Batch stats increment on the same gtpu tunnel so counter - is not incremented per packet */ - if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index)) - { - stats_n_packets -= 1; - stats_n_bytes -= len0; - if (stats_n_packets) - vlib_increment_combined_counter - (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - thread_index, stats_sw_if_index, - stats_n_packets, stats_n_bytes); - stats_n_packets = 1; - stats_n_bytes = len0; - stats_sw_if_index = sw_if_index0; - } + /* speculatively load gtp header version field */ + ver0 = gtpu0->ver_flags; + ver1 = gtpu1->ver_flags; + + /* + * Manipulate gtpu header + * TBD: Manipulate Sequence Number and N-PDU Number + * TBD: Manipulate Next Extension Header + */ + is_fast_track0 = + ((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)); + is_fast_track0 = is_fast_track0 & (gtpu0->type == 255); + + is_fast_track1 = + ((ver1 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)); + is_fast_track1 = is_fast_track1 & (gtpu1->type == 255); + + ext0 = (ver0 & GTPU_E_BIT) ? + (gtpu_ext_header_t *) >pu0->next_ext_type : + &ext; + ext1 = (ver1 & GTPU_E_BIT) ? + (gtpu_ext_header_t *) >pu1->next_ext_type : + &ext; + + gtpu_hdr_len0 = sizeof (gtpu_header_t) - + (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4) + + ext0->len * 4; + gtpu_hdr_len1 = sizeof (gtpu_header_t) - + (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4) + + ext1->len * 4; + + /* Only for clarity, will be optimized away */ + ext0 += ext0->len * 4 / sizeof (*ext0); + ext1 += ext1->len * 4 / sizeof (*ext1); + + has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0); + has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1); + + if (ip_err0 || udp_err0 || csum_err0) + { + next0 = GTPU_INPUT_NEXT_DROP; + error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0); + goto trace0; + } + + /* Diverge the packet paths for 0 and 1 */ + if (PREDICT_FALSE ((!is_fast_track0) | (!has_space0))) + { + /* Not fast path. ext0 and gtpu_hdr_len0 might be wrong */ + + /* GCC will hopefully fix the duplicate compute */ + if (PREDICT_FALSE ( + !((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)) | + (!has_space0))) + { + /* The header or size is wrong */ + error0 = + has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; + next0 = GTPU_INPUT_NEXT_DROP; + goto trace0; + } + /* Correct version and has the space. It can only be unknown + * message type. + */ + error0 = GTPU_ERROR_UNSUPPORTED_TYPE; + next0 = GTPU_INPUT_NEXT_DROP; + + /* The packet is not forwarded */ + goto trace0; + } + + /* Manipulate packet 0 */ + ASSERT (b0->flow_id != 0); + tunnel_index0 = b0->flow_id - gtm->flow_id_start; + t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); + b0->flow_id = 0; + + /* Pop gtpu header */ + vlib_buffer_advance (b0, gtpu_hdr_len0); + + /* assign the next node */ + if (PREDICT_FALSE (t0->decap_next_index != + GTPU_INPUT_NEXT_IP4_INPUT) && + (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT)) + { + error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR; + next0 = GTPU_INPUT_NEXT_DROP; + goto trace0; + } + next0 = t0->decap_next_index; + + sw_if_index0 = t0->sw_if_index; + + /* Set packet input sw_if_index to unicast GTPU tunnel for learning + */ + vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0; + + pkts_decapsulated++; + stats_n_packets += 1; + stats_n_bytes += len0; + + /* Batch stats increment on the same gtpu tunnel so counter + is not incremented per packet */ + if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index)) + { + stats_n_packets -= 1; + stats_n_bytes -= len0; + if (stats_n_packets) + vlib_increment_combined_counter ( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, + thread_index, stats_sw_if_index, stats_n_packets, + stats_n_bytes); + stats_n_packets = 1; + stats_n_bytes = len0; + stats_sw_if_index = sw_if_index0; + } trace0: b0->error = error0 ? node->errors[error0] : 0; @@ -1490,81 +2058,103 @@ trace0: tr->error = error0; tr->tunnel_index = tunnel_index0; tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0; - } + if (vlib_buffer_has_space (b0, 4)) + { + tr->header.ver_flags = gtpu0->ver_flags; + tr->header.type = gtpu0->type; + tr->header.length = clib_net_to_host_u16 (gtpu0->length); + } + } - if (ip_err1 || udp_err1 || csum_err1) - { - next1 = GTPU_INPUT_NEXT_DROP; - error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1); - goto trace1; - } + if (ip_err1 || udp_err1 || csum_err1) + { + next1 = GTPU_INPUT_NEXT_DROP; + error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1); + goto trace1; + } - /* speculatively load gtp header version field */ - ver1 = gtpu1->ver_flags; + /* + * Manipulate gtpu header + * TBD: Manipulate Sequence Number and N-PDU Number + * TBD: Manipulate Next Extension Header + */ + if (PREDICT_FALSE ((!is_fast_track1) | (!has_space1))) + { + /* Not fast path. ext1 and gtpu_hdr_len1 might be wrong */ + + /* GCC will hopefully fix the duplicate compute */ + if (PREDICT_FALSE ( + !((ver1 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)) | + (!has_space1))) + { + /* The header or size is wrong */ + error1 = + has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; + next1 = GTPU_INPUT_NEXT_DROP; + goto trace1; + } + /* Correct version and has the space. It can only be unknown + * message type. + */ + error1 = GTPU_ERROR_UNSUPPORTED_TYPE; + next1 = GTPU_INPUT_NEXT_DROP; - /* - * Manipulate gtpu header - * TBD: Manipulate Sequence Number and N-PDU Number - * TBD: Manipulate Next Extension Header - */ - gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4); - has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1); - if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1))) - { - error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; - next1 = GTPU_INPUT_NEXT_DROP; - goto trace1; - } + /* The packet is not forwarded */ + goto trace1; + } - /* Manipulate packet 1 */ - ASSERT (b1->flow_id != 0); - tunnel_index1 = b1->flow_id - gtm->flow_id_start; - t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1); - b1->flow_id = 0; - - /* Pop gtpu header */ - vlib_buffer_advance (b1, gtpu_hdr_len1); - - /* assign the next node */ - if (PREDICT_FALSE (t1->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) && - (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT)) - { - error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR; - next1 = GTPU_INPUT_NEXT_DROP; - goto trace1; - } - next1 = t1->decap_next_index; + /* Manipulate packet 1 */ + ASSERT (b1->flow_id != 0); + tunnel_index1 = b1->flow_id - gtm->flow_id_start; + t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1); + b1->flow_id = 0; - sw_if_index1 = t1->sw_if_index; + /* Pop gtpu header */ + vlib_buffer_advance (b1, gtpu_hdr_len1); - /* Required to make the l2 tag push / pop code work on l2 subifs */ - /* This won't happen in current implementation as only - ipv4/udp/gtpu/IPV4 type packets can be matched */ - if (PREDICT_FALSE(next1 == GTPU_INPUT_NEXT_L2_INPUT)) - vnet_update_l2_len (b1); + /* assign the next node */ + if (PREDICT_FALSE (t1->decap_next_index != + GTPU_INPUT_NEXT_IP4_INPUT) && + (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT)) + { + error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR; + next1 = GTPU_INPUT_NEXT_DROP; + goto trace1; + } + next1 = t1->decap_next_index; - /* Set packet input sw_if_index to unicast GTPU tunnel for learning */ - vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1; + sw_if_index1 = t1->sw_if_index; - pkts_decapsulated ++; - stats_n_packets += 1; - stats_n_bytes += len1; + /* Required to make the l2 tag push / pop code work on l2 subifs */ + /* This won't happen in current implementation as only + ipv4/udp/gtpu/IPV4 type packets can be matched */ + if (PREDICT_FALSE (next1 == GTPU_INPUT_NEXT_L2_INPUT)) + vnet_update_l2_len (b1); + + /* Set packet input sw_if_index to unicast GTPU tunnel for learning + */ + vnet_buffer (b1)->sw_if_index[VLIB_RX] = sw_if_index1; - /* Batch stats increment on the same gtpu tunnel so counter - is not incremented per packet */ - if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index)) - { - stats_n_packets -= 1; - stats_n_bytes -= len1; - if (stats_n_packets) - vlib_increment_combined_counter - (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - thread_index, stats_sw_if_index, - stats_n_packets, stats_n_bytes); - stats_n_packets = 1; - stats_n_bytes = len1; - stats_sw_if_index = sw_if_index1; - } + pkts_decapsulated++; + stats_n_packets += 1; + stats_n_bytes += len1; + + /* Batch stats increment on the same gtpu tunnel so counter + is not incremented per packet */ + if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index)) + { + stats_n_packets -= 1; + stats_n_bytes -= len1; + if (stats_n_packets) + vlib_increment_combined_counter ( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, + thread_index, stats_sw_if_index, stats_n_packets, + stats_n_bytes); + stats_n_packets = 1; + stats_n_bytes = len1; + stats_sw_if_index = sw_if_index1; + } trace1: b1->error = error1 ? node->errors[error1] : 0; @@ -1577,12 +2167,18 @@ trace1: tr->error = error1; tr->tunnel_index = tunnel_index1; tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0; - } + if (vlib_buffer_has_space (b1, 4)) + { + tr->header.ver_flags = gtpu1->ver_flags; + tr->header.type = gtpu1->type; + tr->header.length = clib_net_to_host_u16 (gtpu1->length); + } + } - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, - to_next, n_left_to_next, - bi0, bi1, next0, next1); - } + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, next0, + next1); +} while (n_left_from > 0 && n_left_to_next > 0) { @@ -1597,97 +2193,135 @@ trace1: u32 sw_if_index0, len0; u8 has_space0 = 0; u8 ver0; + gtpu_ext_header_t ext = { .type = 0, .len = 0, .pad = 0 }; + gtpu_ext_header_t *ext0; + bool is_fast_track0; + ext0 = &ext; - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - len0 = vlib_buffer_length_in_chain (vm, b0); - - tunnel_index0 = ~0; - error0 = 0; - - ip_err0 = gtpu_check_ip (b0, len0); - udp_err0 = gtpu_check_ip_udp_len (b0); - if (PREDICT_FALSE (gtpu_local_need_csum_check (b0))) - csum_err0 = !gtpu_validate_udp_csum (vm, b0); - else - csum_err0 = !gtpu_local_csum_is_valid (b0); - - if (ip_err0 || udp_err0 || csum_err0) - { - next0 = GTPU_INPUT_NEXT_DROP; - error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0); - goto trace00; - } + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; - /* udp leaves current_data pointing at the gtpu header */ - gtpu0 = vlib_buffer_get_current (b0); + b0 = vlib_get_buffer (vm, bi0); + len0 = vlib_buffer_length_in_chain (vm, b0); - /* speculatively load gtp header version field */ - ver0 = gtpu0->ver_flags; + tunnel_index0 = ~0; + error0 = 0; - /* - * Manipulate gtpu header - * TBD: Manipulate Sequence Number and N-PDU Number - * TBD: Manipulate Next Extension Header - */ - gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4); + ip_err0 = gtpu_check_ip (b0, len0); + udp_err0 = gtpu_check_ip_udp_len (b0); + if (PREDICT_FALSE (gtpu_local_need_csum_check (b0))) + csum_err0 = !gtpu_validate_udp_csum (vm, b0); + else + csum_err0 = !gtpu_local_csum_is_valid (b0); - has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0); - if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0))) - { - error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; - next0 = GTPU_INPUT_NEXT_DROP; - goto trace00; - } - - ASSERT (b0->flow_id != 0); - tunnel_index0 = b0->flow_id - gtm->flow_id_start; - t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); - b0->flow_id = 0; - - /* Pop gtpu header */ - vlib_buffer_advance (b0, gtpu_hdr_len0); - - /* assign the next node */ - if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) && - (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT)) - { - error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR; - next0 = GTPU_INPUT_NEXT_DROP; - goto trace00; - } - next0 = t0->decap_next_index; + /* udp leaves current_data pointing at the gtpu header */ + gtpu0 = vlib_buffer_get_current (b0); - sw_if_index0 = t0->sw_if_index; + /* speculatively load gtp header version field */ + ver0 = gtpu0->ver_flags; - /* Set packet input sw_if_index to unicast GTPU tunnel for learning */ - vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0; + /* + * Manipulate gtpu header + * TBD: Manipulate Sequence Number and N-PDU Number + * TBD: Manipulate Next Extension Header + */ + is_fast_track0 = + ((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)); + is_fast_track0 = is_fast_track0 & (gtpu0->type == 255); + + ext0 = (ver0 & GTPU_E_BIT) ? + (gtpu_ext_header_t *) >pu0->next_ext_type : + &ext; + + gtpu_hdr_len0 = sizeof (gtpu_header_t) - + (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4) + + ext0->len * 4; + ext0 += ext0->len * 4 / sizeof (*ext0); + + has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0); + + if (ip_err0 || udp_err0 || csum_err0) + { + next0 = GTPU_INPUT_NEXT_DROP; + error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0); + goto trace00; + } - pkts_decapsulated ++; - stats_n_packets += 1; - stats_n_bytes += len0; + if (PREDICT_FALSE ((!is_fast_track0) | (!has_space0))) + { + /* Not fast path. ext0 and gtpu_hdr_len0 might be wrong */ + + /* GCC will hopefully fix the duplicate compute */ + if (PREDICT_FALSE ( + !((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) == + (GTPU_V1_VER | GTPU_PT_BIT)) | + (!has_space0))) + { + /* The header or size is wrong */ + error0 = + has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL; + next0 = GTPU_INPUT_NEXT_DROP; + goto trace00; + } + /* Correct version and has the space. It can only be unknown + * message type. + */ + error0 = GTPU_ERROR_UNSUPPORTED_TYPE; + next0 = GTPU_INPUT_NEXT_DROP; - /* Batch stats increment on the same gtpu tunnel so counter - is not incremented per packet */ - if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index)) - { - stats_n_packets -= 1; - stats_n_bytes -= len0; - if (stats_n_packets) - vlib_increment_combined_counter - (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - thread_index, stats_sw_if_index, - stats_n_packets, stats_n_bytes); - stats_n_packets = 1; - stats_n_bytes = len0; - stats_sw_if_index = sw_if_index0; - } + /* The packet is not forwarded */ + goto trace00; + } + + ASSERT (b0->flow_id != 0); + tunnel_index0 = b0->flow_id - gtm->flow_id_start; + t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0); + b0->flow_id = 0; + + /* Pop gtpu header */ + vlib_buffer_advance (b0, gtpu_hdr_len0); + + /* assign the next node */ + if (PREDICT_FALSE (t0->decap_next_index != + GTPU_INPUT_NEXT_IP4_INPUT) && + (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT)) + { + error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR; + next0 = GTPU_INPUT_NEXT_DROP; + goto trace00; + } + next0 = t0->decap_next_index; + + sw_if_index0 = t0->sw_if_index; + + /* Set packet input sw_if_index to unicast GTPU tunnel for learning + */ + vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0; + + pkts_decapsulated++; + stats_n_packets += 1; + stats_n_bytes += len0; + + /* Batch stats increment on the same gtpu tunnel so counter + is not incremented per packet */ + if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index)) + { + stats_n_packets -= 1; + stats_n_bytes -= len0; + if (stats_n_packets) + vlib_increment_combined_counter ( + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, + thread_index, stats_sw_if_index, stats_n_packets, + stats_n_bytes); + stats_n_packets = 1; + stats_n_bytes = len0; + stats_sw_if_index = sw_if_index0; + } trace00: b0->error = error0 ? node->errors[error0] : 0; @@ -1699,11 +2333,16 @@ trace1: tr->error = error0; tr->tunnel_index = tunnel_index0; tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0; - } - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } + if (vlib_buffer_has_space (b0, 4)) + { + tr->header.ver_flags = gtpu0->ver_flags; + tr->header.type = gtpu0->type; + tr->header.length = clib_net_to_host_u16 (gtpu0->length); + } + } + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } diff --git a/src/plugins/gtpu/gtpu_encap.c b/src/plugins/gtpu/gtpu_encap.c index 4b7d98786f4..2c3c46a4be2 100644 --- a/src/plugins/gtpu/gtpu_encap.c +++ b/src/plugins/gtpu/gtpu_encap.c @@ -199,7 +199,8 @@ gtpu_encap_inline (vlib_main_t * vm, copy_dst3 = (u64 *) ip4_3; copy_src3 = (u64 *) t3->rewrite; - /* Copy first 32 octets 8-bytes at a time */ + /* Copy first 32 octets 8-bytes at a time (minimum size) + * TODO: check if clib_memcpy_fast is better */ #define _(offs) copy_dst0[offs] = copy_src0[offs]; foreach_fixed_header4_offset; #undef _ @@ -212,19 +213,83 @@ gtpu_encap_inline (vlib_main_t * vm, #define _(offs) copy_dst3[offs] = copy_src3[offs]; foreach_fixed_header4_offset; #undef _ - /* Last 4 octets. Hopefully gcc will be our friend */ - copy_dst_last0 = (u32 *)(©_dst0[4]); - copy_src_last0 = (u32 *)(©_src0[4]); - copy_dst_last0[0] = copy_src_last0[0]; - copy_dst_last1 = (u32 *)(©_dst1[4]); - copy_src_last1 = (u32 *)(©_src1[4]); - copy_dst_last1[0] = copy_src_last1[0]; - copy_dst_last2 = (u32 *)(©_dst2[4]); - copy_src_last2 = (u32 *)(©_src2[4]); - copy_dst_last2[0] = copy_src_last2[0]; - copy_dst_last3 = (u32 *)(©_dst3[4]); - copy_src_last3 = (u32 *)(©_src3[4]); - copy_dst_last3[0] = copy_src_last3[0]; + + /* Copy last octets */ + if (_vec_len (t0->rewrite) == 36) + { + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last0 = (u32 *) (©_dst0[4]); + copy_src_last0 = (u32 *) (©_src0[4]); + copy_dst_last0[0] = copy_src_last0[0]; + } + else + { + /* Near last 8 octets. */ +#define _(offs) copy_dst0[offs] = copy_src0[offs]; + _ (4); +#undef _ + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last0 = (u32 *) (©_dst0[5]); + copy_src_last0 = (u32 *) (©_src0[5]); + copy_dst_last0[0] = copy_src_last0[0]; + } + + if (_vec_len (t1->rewrite) == 36) + { + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last1 = (u32 *) (©_dst1[4]); + copy_src_last1 = (u32 *) (©_src1[4]); + copy_dst_last1[0] = copy_src_last1[0]; + } + else + { + /* Near last 8 octets. */ +#define _(offs) copy_dst1[offs] = copy_src1[offs]; + _ (4); +#undef _ + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last1 = (u32 *) (©_dst1[5]); + copy_src_last1 = (u32 *) (©_src1[5]); + copy_dst_last1[0] = copy_src_last1[0]; + } + + if (_vec_len (t2->rewrite) == 36) + { + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last2 = (u32 *) (©_dst2[4]); + copy_src_last2 = (u32 *) (©_src2[4]); + copy_dst_last2[0] = copy_src_last2[0]; + } + else + { + /* Near last 8 octets. */ +#define _(offs) copy_dst2[offs] = copy_src2[offs]; + _ (4); +#undef _ + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last2 = (u32 *) (©_dst2[5]); + copy_src_last2 = (u32 *) (©_src2[5]); + copy_dst_last2[0] = copy_src_last2[0]; + } + + if (_vec_len (t3->rewrite) == 36) + { + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last3 = (u32 *) (©_dst3[4]); + copy_src_last3 = (u32 *) (©_src3[4]); + copy_dst_last3[0] = copy_src_last3[0]; + } + else + { + /* Near last 8 octets. */ +#define _(offs) copy_dst3[offs] = copy_src3[offs]; + _ (4); +#undef _ + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last3 = (u32 *) (©_dst3[5]); + copy_src_last3 = (u32 *) (©_src3[5]); + copy_dst_last3[0] = copy_src_last3[0]; + } /* Fix the IP4 checksum and length */ sum0 = ip4_0->checksum; @@ -318,7 +383,7 @@ gtpu_encap_inline (vlib_main_t * vm, copy_src2 = (u64 *) t2->rewrite; copy_dst3 = (u64 *) ip6_3; copy_src3 = (u64 *) t3->rewrite; - /* Copy first 56 (ip6) octets 8-bytes at a time */ + /* Copy first 56 (ip6) octets 8-bytes at a time (minimum size) */ #define _(offs) copy_dst0[offs] = copy_src0[offs]; foreach_fixed_header6_offset; #undef _ @@ -331,6 +396,40 @@ gtpu_encap_inline (vlib_main_t * vm, #define _(offs) copy_dst3[offs] = copy_src3[offs]; foreach_fixed_header6_offset; #undef _ + + /* Copy last octets */ + if (_vec_len (t0->rewrite) == 64) + { + /* Last 8 octets. */ +#define _(offs) copy_dst0[offs] = copy_src0[offs]; + _ (7); +#undef _ + } + + if (_vec_len (t1->rewrite) == 64) + { + /* Last 8 octets. */ +#define _(offs) copy_dst1[offs] = copy_src1[offs]; + _ (7); +#undef _ + } + + if (_vec_len (t2->rewrite) == 64) + { + /* Last 8 octets. */ +#define _(offs) copy_dst2[offs] = copy_src2[offs]; + _ (7); +#undef _ + } + + if (_vec_len (t3->rewrite) == 64) + { + /* Last 8 octets. */ +#define _(offs) copy_dst3[offs] = copy_src3[offs]; + _ (7); +#undef _ + } + /* Fix IP6 payload length */ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) @@ -466,15 +565,19 @@ gtpu_encap_inline (vlib_main_t * vm, vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->tunnel_index = t0 - gtm->tunnels; tr->tteid = t0->tteid; - } + tr->pdu_extension = t0->pdu_extension; + tr->qfi = t0->qfi; + } - if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) - { - gtpu_encap_trace_t *tr = - vlib_add_trace (vm, node, b1, sizeof (*tr)); - tr->tunnel_index = t1 - gtm->tunnels; - tr->tteid = t1->tteid; - } + if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) + { + gtpu_encap_trace_t *tr = + vlib_add_trace (vm, node, b1, sizeof (*tr)); + tr->tunnel_index = t1 - gtm->tunnels; + tr->tteid = t1->tteid; + tr->pdu_extension = t1->pdu_extension; + tr->qfi = t1->qfi; + } if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED)) { @@ -482,15 +585,19 @@ gtpu_encap_inline (vlib_main_t * vm, vlib_add_trace (vm, node, b2, sizeof (*tr)); tr->tunnel_index = t2 - gtm->tunnels; tr->tteid = t2->tteid; - } + tr->pdu_extension = t2->pdu_extension; + tr->qfi = t2->qfi; + } - if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED)) - { - gtpu_encap_trace_t *tr = - vlib_add_trace (vm, node, b3, sizeof (*tr)); - tr->tunnel_index = t3 - gtm->tunnels; - tr->tteid = t3->tteid; - } + if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED)) + { + gtpu_encap_trace_t *tr = + vlib_add_trace (vm, node, b3, sizeof (*tr)); + tr->tunnel_index = t3 - gtm->tunnels; + tr->tteid = t3->tteid; + tr->pdu_extension = t3->pdu_extension; + tr->qfi = t3->qfi; + } vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, n_left_to_next, @@ -532,8 +639,9 @@ gtpu_encap_inline (vlib_main_t * vm, next0 = t0->next_dpo.dpoi_next_node; vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index; - /* Apply the rewrite string. $$$$ vnet_rewrite? */ - vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite)); + /* Apply the rewrite string. $$$$ vnet_rewrite. + * The correct total size is set in ip_udp_gtpu_rewrite() */ + vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite)); if (is_ip4) { @@ -546,10 +654,26 @@ gtpu_encap_inline (vlib_main_t * vm, #define _(offs) copy_dst0[offs] = copy_src0[offs]; foreach_fixed_header4_offset; #undef _ - /* Last 4 octets. Hopefully gcc will be our friend */ - copy_dst_last0 = (u32 *)(©_dst0[4]); - copy_src_last0 = (u32 *)(©_src0[4]); - copy_dst_last0[0] = copy_src_last0[0]; + + /* Copy last octets */ + if (_vec_len (t0->rewrite) == 36) + { + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last0 = (u32 *) (©_dst0[4]); + copy_src_last0 = (u32 *) (©_src0[4]); + copy_dst_last0[0] = copy_src_last0[0]; + } + else + { + /* Near last 8 octets. */ +#define _(offs) copy_dst0[offs] = copy_src0[offs]; + _ (4); +#undef _ + /* Last 4 octets. Hopefully gcc will be our friend */ + copy_dst_last0 = (u32 *) (©_dst0[5]); + copy_src_last0 = (u32 *) (©_src0[5]); + copy_dst_last0[0] = copy_src_last0[0]; + } /* Fix the IP4 checksum and length */ sum0 = ip4_0->checksum; @@ -587,6 +711,16 @@ gtpu_encap_inline (vlib_main_t * vm, #define _(offs) copy_dst0[offs] = copy_src0[offs]; foreach_fixed_header6_offset; #undef _ + + /* Copy last octets */ + if (_vec_len (t0->rewrite) == 64) + { + /* Last 8 octets. */ +#define _(offs) copy_dst0[offs] = copy_src0[offs]; + _ (7); +#undef _ + } + /* Fix IP6 payload length */ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) @@ -600,9 +734,9 @@ gtpu_encap_inline (vlib_main_t * vm, /* Fix GTPU length */ gtpu0 = (gtpu_header_t *)(udp0+1); - new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0) - - sizeof (*ip4_0) - sizeof(*udp0) - - GTPU_V1_HDR_LEN); + new_l0 = clib_host_to_net_u16 ( + vlib_buffer_length_in_chain (vm, b0) - sizeof (*ip6_0) - + sizeof (*udp0) - GTPU_V1_HDR_LEN); gtpu0->length = new_l0; /* IPv6 UDP checksum is mandatory */ @@ -644,7 +778,9 @@ gtpu_encap_inline (vlib_main_t * vm, vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->tunnel_index = t0 - gtm->tunnels; tr->tteid = t0->tteid; - } + tr->pdu_extension = t0->pdu_extension; + tr->qfi = t0->qfi; + } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); diff --git a/src/plugins/gtpu/gtpu_error.def b/src/plugins/gtpu/gtpu_error.def index 4351529ef25..6b521c8658a 100644 --- a/src/plugins/gtpu/gtpu_error.def +++ b/src/plugins/gtpu/gtpu_error.def @@ -17,3 +17,5 @@ gtpu_error (NO_SUCH_TUNNEL, "no such tunnel packets") gtpu_error (BAD_VER, "packets with bad version in gtpu header") gtpu_error (BAD_FLAGS, "packets with bad flags field in gtpu header") gtpu_error (TOO_SMALL, "packet too small to fit a gtpu header") +gtpu_error (UNSUPPORTED_TYPE, "packets with message type < 255 in gtpu header") +gtpu_error (NO_ERROR_TUNNEL, "did not find an forward tunnel") diff --git a/src/plugins/gtpu/gtpu_test.c b/src/plugins/gtpu/gtpu_test.c index dcfe3d02666..fadcb82cb88 100644 --- a/src/plugins/gtpu/gtpu_test.c +++ b/src/plugins/gtpu/gtpu_test.c @@ -298,9 +298,9 @@ api_gtpu_add_del_tunnel (vat_main_t * vam) unformat_gtpu_decap_next, &decap_next_index)) ; else if (unformat (line_input, "teid %d", &teid)) - ; + ; else if (unformat (line_input, "tteid %d", &tteid)) - ; + ; else { errmsg ("parse error '%U'", format_unformat_error, line_input); @@ -360,6 +360,175 @@ api_gtpu_add_del_tunnel (vat_main_t * vam) return ret; } +static void +vl_api_gtpu_add_del_tunnel_v2_reply_t_handler ( + vl_api_gtpu_add_del_tunnel_v2_reply_t *mp) +{ + vat_main_t *vam = &vat_main; + i32 retval = ntohl (mp->retval); + if (vam->async_mode) + { + vam->async_errors += (retval < 0); + } + else + { + vam->retval = retval; + vam->sw_if_index = ntohl (mp->sw_if_index); + vam->result_ready = 1; + } +} + +static int +api_gtpu_add_del_tunnel_v2 (vat_main_t *vam) +{ + unformat_input_t *line_input = vam->input; + vl_api_gtpu_add_del_tunnel_v2_t *mp; + ip46_address_t src, dst; + u8 is_add = 1; + u8 ipv4_set = 0, ipv6_set = 0; + u8 src_set = 0; + u8 dst_set = 0; + u8 grp_set = 0; + u32 mcast_sw_if_index = ~0; + u32 encap_vrf_id = 0; + u32 decap_next_index = ~0; + u32 teid = 0, tteid = 0; + u8 pdu_extension = 0; + u32 qfi = 0; + int ret; + + /* Can't "universally zero init" (={0}) due to GCC bug 53119 */ + clib_memset (&src, 0, sizeof src); + clib_memset (&dst, 0, sizeof dst); + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "del")) + is_add = 0; + else if (unformat (line_input, "src %U", unformat_ip4_address, &src.ip4)) + { + ipv4_set = 1; + src_set = 1; + } + else if (unformat (line_input, "dst %U", unformat_ip4_address, &dst.ip4)) + { + ipv4_set = 1; + dst_set = 1; + } + else if (unformat (line_input, "src %U", unformat_ip6_address, &src.ip6)) + { + ipv6_set = 1; + src_set = 1; + } + else if (unformat (line_input, "dst %U", unformat_ip6_address, &dst.ip6)) + { + ipv6_set = 1; + dst_set = 1; + } + else if (unformat (line_input, "group %U %U", unformat_ip4_address, + &dst.ip4, api_unformat_sw_if_index, vam, + &mcast_sw_if_index)) + { + grp_set = dst_set = 1; + ipv4_set = 1; + } + else if (unformat (line_input, "group %U", unformat_ip4_address, + &dst.ip4)) + { + grp_set = dst_set = 1; + ipv4_set = 1; + } + else if (unformat (line_input, "group %U %U", unformat_ip6_address, + &dst.ip6, api_unformat_sw_if_index, vam, + &mcast_sw_if_index)) + { + grp_set = dst_set = 1; + ipv6_set = 1; + } + else if (unformat (line_input, "group %U", unformat_ip6_address, + &dst.ip6)) + { + grp_set = dst_set = 1; + ipv6_set = 1; + } + else if (unformat (line_input, "mcast_sw_if_index %u", + &mcast_sw_if_index)) + ; + else if (unformat (line_input, "encap-vrf-id %d", &encap_vrf_id)) + ; + else if (unformat (line_input, "decap-next %U", unformat_gtpu_decap_next, + &decap_next_index)) + ; + else if (unformat (line_input, "teid %d", &teid)) /* Change to %u ? */ + ; + else if (unformat (line_input, "tteid %d", &tteid)) /* Change to %u ? */ + ; + else if (unformat (line_input, "qfi %u", &qfi)) + pdu_extension = 1; + else + { + errmsg ("parse error '%U'", format_unformat_error, line_input); + return -99; + } + } + + if (is_add && src_set == 0) + { + errmsg ("tunnel src address not specified"); + return -99; + } + if (dst_set == 0) + { + errmsg ("tunnel dst address not specified"); + return -99; + } + + if (grp_set && !ip46_address_is_multicast (&dst)) + { + errmsg ("tunnel group address not multicast"); + return -99; + } + if (grp_set && mcast_sw_if_index == ~0) + { + errmsg ("tunnel nonexistent multicast device"); + return -99; + } + if (grp_set == 0 && ip46_address_is_multicast (&dst)) + { + errmsg ("tunnel dst address must be unicast"); + return -99; + } + + if (ipv4_set && ipv6_set) + { + errmsg ("both IPv4 and IPv6 addresses specified"); + return -99; + } + if (qfi > 31) + { + errmsg ("qfi max value is 31"); + return -99; + } + + M (GTPU_ADD_DEL_TUNNEL_V2, mp); + + ip_address_encode (&src, ipv6_set ? IP46_TYPE_IP6 : IP46_TYPE_IP4, + &mp->src_address); + ip_address_encode (&dst, ipv6_set ? IP46_TYPE_IP6 : IP46_TYPE_IP4, + &mp->dst_address); + mp->encap_vrf_id = ntohl (encap_vrf_id); + mp->decap_next_index = ntohl (decap_next_index); + mp->mcast_sw_if_index = ntohl (mcast_sw_if_index); + mp->teid = ntohl (teid); + mp->tteid = ntohl (tteid); + mp->is_add = is_add; + mp->pdu_extension = pdu_extension; + mp->qfi = ntohl (qfi); + + S (mp); + W (ret); + return ret; +} static int api_gtpu_tunnel_update_tteid (vat_main_t * vam) { @@ -436,6 +605,40 @@ static void vl_api_gtpu_tunnel_details_t_handler ntohl (mp->mcast_sw_if_index)); } +static void +vl_api_gtpu_tunnel_v2_details_t_handler (vl_api_gtpu_tunnel_v2_details_t *mp) +{ + vat_main_t *vam = &vat_main; + ip46_address_t src; + ip46_address_t dst; + ip_address_decode (&mp->dst_address, &dst); + ip_address_decode (&mp->src_address, &src); + print (vam->ofp, "%11d%24U%24U%14d%18d%13d%13d%19d%15d%5d%15d%17d", + ntohl (mp->sw_if_index), format_ip46_address, &src, IP46_TYPE_ANY, + format_ip46_address, &dst, IP46_TYPE_ANY, ntohl (mp->encap_vrf_id), + ntohl (mp->decap_next_index), ntohl (mp->teid), ntohl (mp->tteid), + ntohl (mp->mcast_sw_if_index), mp->pdu_extension, mp->qfi, + mp->is_forwarding, ntohl (mp->forwarding_type)); +} + +static void +vl_api_gtpu_add_del_forward_reply_t_handler ( + vl_api_gtpu_add_del_forward_reply_t *mp) +{ + vat_main_t *vam = &vat_main; + i32 retval = ntohl (mp->retval); + if (vam->async_mode) + { + vam->async_errors += (retval < 0); + } + else + { + vam->retval = retval; + vam->sw_if_index = ntohl (mp->sw_if_index); + vam->result_ready = 1; + } +} + static int api_gtpu_tunnel_dump (vat_main_t * vam) { @@ -480,4 +683,163 @@ api_gtpu_tunnel_dump (vat_main_t * vam) return 0; } +static int +api_gtpu_tunnel_v2_dump (vat_main_t *vam) +{ + unformat_input_t *i = vam->input; + vl_api_gtpu_tunnel_dump_t *mp; + u32 sw_if_index; + u8 sw_if_index_set = 0; + + /* Parse args required to build the message */ + while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) + { + if (unformat (i, "sw_if_index %d", &sw_if_index)) + sw_if_index_set = 1; + else + break; + } + + if (sw_if_index_set == 0) + { + sw_if_index = ~0; + } + + if (!vam->json_output) + { + print (vam->ofp, "%11s%24s%24s%14s%18s%13s%13s%19s%12s%5s%15s%17s", + "sw_if_index", "src_address", "dst_address", "encap_vrf_id", + "decap_next_index", "teid", "tteid", "mcast_sw_if_index", + "pdu_extension", "qfi", "is_forwarding", "forwarding_type"); + } + + /* Get list of gtpu-tunnel interfaces */ + M (GTPU_TUNNEL_DUMP, mp); + + mp->sw_if_index = htonl (sw_if_index); + + S (mp); + + /* No status response for this API call. + * Wait 1 sec for any dump output before return to vat# */ + sleep (1); + + return 0; +} + +static int +api_gtpu_add_del_forward (vat_main_t *vam) +{ + unformat_input_t *line_input = vam->input; + vl_api_gtpu_add_del_forward_t *mp; + int ret; + u32 decap_next_index = GTPU_INPUT_NEXT_L2_INPUT; + int is_add = 1; + ip46_address_t dst; + u8 dst_set = 0; + u8 type = 0; + u8 ipv6_set = 0; + u32 encap_vrf_id; + + /* Cant "universally zero init" (={0}) due to GCC bug 53119 */ + clib_memset (&dst, 0, sizeof dst); + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "dst %U", unformat_ip4_address, &dst.ip4)) + dst_set = 1; + else if (unformat (line_input, "dst %U", unformat_ip6_address, &dst.ip6)) + { + dst_set = 1; + ipv6_set = 1; + } + else if (unformat (line_input, "decap-next %U", unformat_gtpu_decap_next, + &decap_next_index)) + ; + else if (unformat (line_input, "encap-vrf-id %d", &encap_vrf_id)) + ; + else if (unformat (line_input, "del")) + is_add = 0; + else if (unformat (line_input, "bad-header")) + type |= GTPU_FORWARD_BAD_HEADER; + else if (unformat (line_input, "unknown-teid")) + type |= GTPU_FORWARD_UNKNOWN_TEID; + else if (unformat (line_input, "unknown-type")) + type |= GTPU_FORWARD_UNKNOWN_TYPE; + else + { + errmsg ("parse error '%U'", format_unformat_error, line_input); + return -99; + } + } + + if (!dst_set) + { + errmsg ("dst must be set to a valid IP address"); + return -99; + } + + M (GTPU_ADD_DEL_FORWARD, mp); + + mp->is_add = is_add; + ip_address_encode (&dst, ipv6_set ? IP46_TYPE_IP6 : IP46_TYPE_IP4, + &mp->dst_address); + mp->forwarding_type = type; + mp->encap_vrf_id = ntohl (encap_vrf_id); + mp->decap_next_index = ntohl (decap_next_index); + + S (mp); + W (ret); + return ret; +} + +static int +api_gtpu_get_transfer_counts (vat_main_t *vam) +{ + unformat_input_t *line_input = vam->input; + vl_api_gtpu_get_transfer_counts_t *mp; + u32 start_index; + u32 capacity; + int ret; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "start_index %u", &start_index)) + ; + else if (unformat (line_input, "capacity %u", &capacity)) + ; + else + { + errmsg ("parse error '%U'", format_unformat_error, line_input); + return -99; + } + } + + M (GTPU_GET_TRANSFER_COUNTS, mp); + mp->sw_if_index_start = start_index; + mp->capacity = capacity; + + S (mp); // TODO: Handle the prints somehow. But how is it done?? + W (ret); + return ret; +} + +static void +vl_api_gtpu_get_transfer_counts_reply_t_handler ( + vl_api_gtpu_get_transfer_counts_reply_t *mp) +{ + vat_main_t *vam = &vat_main; + i32 retval = ntohl (mp->retval); + if (vam->async_mode) + { + vam->async_errors += (retval < 0); + } + else + { + vam->retval = retval; + // TODO: Add reply here? + vam->result_ready = 1; + } +} + #include <gtpu/gtpu.api_test.c> |